ZTWHHH commited on
Commit
287222b
·
verified ·
1 Parent(s): 201c9f5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. parrot/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 +3 -0
  3. vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so +3 -0
  4. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc +0 -0
  5. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc +0 -0
  6. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc +0 -0
  7. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc +0 -0
  8. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc +0 -0
  9. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc +0 -0
  10. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc +0 -0
  11. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc +0 -0
  12. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc +0 -0
  13. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc +0 -0
  14. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc +0 -0
  15. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/api.py +85 -0
  16. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/base.py +583 -0
  17. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/common.py +1748 -0
  18. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/concat.py +348 -0
  19. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py +2348 -0
  20. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/inference.py +437 -0
  21. vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/missing.py +810 -0
  22. vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__init__.py +15 -0
  23. vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/generic.py +2852 -0
  24. vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/groupby.py +0 -0
  25. vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/grouper.py +1102 -0
  26. vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/indexing.py +304 -0
  27. vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/ops.py +1208 -0
  28. vlmpy310/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc +0 -0
  29. vlmpy310/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc +0 -0
  30. vlmpy310/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc +0 -0
  31. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__init__.py +0 -0
  32. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc +0 -0
  33. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc +0 -0
  34. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/category.cpython-310.pyc +0 -0
  35. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimelike.cpython-310.pyc +0 -0
  36. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimes.cpython-310.pyc +0 -0
  37. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/extension.cpython-310.pyc +0 -0
  38. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/frozen.cpython-310.pyc +0 -0
  39. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/interval.cpython-310.pyc +0 -0
  40. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/period.cpython-310.pyc +0 -0
  41. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc +0 -0
  42. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/accessors.py +643 -0
  43. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/api.py +388 -0
  44. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/base.py +0 -0
  45. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/category.py +513 -0
  46. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/datetimelike.py +843 -0
  47. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/datetimes.py +1127 -0
  48. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/extension.py +172 -0
  49. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/frozen.py +120 -0
  50. vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/interval.py +1136 -0
.gitattributes CHANGED
@@ -1242,3 +1242,5 @@ vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/timezones.cpython-310-
1242
  vlmpy310/lib/python3.10/site-packages/pandas/_libs/window/aggregations.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1243
  vlmpy310/lib/python3.10/site-packages/pandas/_libs/window/indexers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1244
  vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
1242
  vlmpy310/lib/python3.10/site-packages/pandas/_libs/window/aggregations.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1243
  vlmpy310/lib/python3.10/site-packages/pandas/_libs/window/indexers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1244
  vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1245
+ vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1246
+ parrot/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8278dcc6632df94762737b1c930050075738affba25e73cb1cac1b448472dc06
3
+ size 232685936
vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23bc30a2cb98e39d577634d9f4473ca93bc84d4e563dc8a06a050e41550333f6
3
+ size 1175424
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc ADDED
Binary file (6.7 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc ADDED
Binary file (18.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc ADDED
Binary file (39 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc ADDED
Binary file (42 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc ADDED
Binary file (62.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc ADDED
Binary file (3.21 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc ADDED
Binary file (9.54 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/api.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.dtypes.common import (
2
+ is_any_real_numeric_dtype,
3
+ is_array_like,
4
+ is_bool,
5
+ is_bool_dtype,
6
+ is_categorical_dtype,
7
+ is_complex,
8
+ is_complex_dtype,
9
+ is_datetime64_any_dtype,
10
+ is_datetime64_dtype,
11
+ is_datetime64_ns_dtype,
12
+ is_datetime64tz_dtype,
13
+ is_dict_like,
14
+ is_dtype_equal,
15
+ is_extension_array_dtype,
16
+ is_file_like,
17
+ is_float,
18
+ is_float_dtype,
19
+ is_hashable,
20
+ is_int64_dtype,
21
+ is_integer,
22
+ is_integer_dtype,
23
+ is_interval,
24
+ is_interval_dtype,
25
+ is_iterator,
26
+ is_list_like,
27
+ is_named_tuple,
28
+ is_number,
29
+ is_numeric_dtype,
30
+ is_object_dtype,
31
+ is_period_dtype,
32
+ is_re,
33
+ is_re_compilable,
34
+ is_scalar,
35
+ is_signed_integer_dtype,
36
+ is_sparse,
37
+ is_string_dtype,
38
+ is_timedelta64_dtype,
39
+ is_timedelta64_ns_dtype,
40
+ is_unsigned_integer_dtype,
41
+ pandas_dtype,
42
+ )
43
+
44
+ __all__ = [
45
+ "is_any_real_numeric_dtype",
46
+ "is_array_like",
47
+ "is_bool",
48
+ "is_bool_dtype",
49
+ "is_categorical_dtype",
50
+ "is_complex",
51
+ "is_complex_dtype",
52
+ "is_datetime64_any_dtype",
53
+ "is_datetime64_dtype",
54
+ "is_datetime64_ns_dtype",
55
+ "is_datetime64tz_dtype",
56
+ "is_dict_like",
57
+ "is_dtype_equal",
58
+ "is_extension_array_dtype",
59
+ "is_file_like",
60
+ "is_float",
61
+ "is_float_dtype",
62
+ "is_hashable",
63
+ "is_int64_dtype",
64
+ "is_integer",
65
+ "is_integer_dtype",
66
+ "is_interval",
67
+ "is_interval_dtype",
68
+ "is_iterator",
69
+ "is_list_like",
70
+ "is_named_tuple",
71
+ "is_number",
72
+ "is_numeric_dtype",
73
+ "is_object_dtype",
74
+ "is_period_dtype",
75
+ "is_re",
76
+ "is_re_compilable",
77
+ "is_scalar",
78
+ "is_signed_integer_dtype",
79
+ "is_sparse",
80
+ "is_string_dtype",
81
+ "is_timedelta64_dtype",
82
+ "is_timedelta64_ns_dtype",
83
+ "is_unsigned_integer_dtype",
84
+ "pandas_dtype",
85
+ ]
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/base.py ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Extend pandas with custom array types.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ TypeVar,
10
+ cast,
11
+ overload,
12
+ )
13
+
14
+ import numpy as np
15
+
16
+ from pandas._libs import missing as libmissing
17
+ from pandas._libs.hashtable import object_hash
18
+ from pandas._libs.properties import cache_readonly
19
+ from pandas.errors import AbstractMethodError
20
+
21
+ from pandas.core.dtypes.generic import (
22
+ ABCDataFrame,
23
+ ABCIndex,
24
+ ABCSeries,
25
+ )
26
+
27
+ if TYPE_CHECKING:
28
+ from pandas._typing import (
29
+ DtypeObj,
30
+ Self,
31
+ Shape,
32
+ npt,
33
+ type_t,
34
+ )
35
+
36
+ from pandas import Index
37
+ from pandas.core.arrays import ExtensionArray
38
+
39
+ # To parameterize on same ExtensionDtype
40
+ ExtensionDtypeT = TypeVar("ExtensionDtypeT", bound="ExtensionDtype")
41
+
42
+
43
+ class ExtensionDtype:
44
+ """
45
+ A custom data type, to be paired with an ExtensionArray.
46
+
47
+ See Also
48
+ --------
49
+ extensions.register_extension_dtype: Register an ExtensionType
50
+ with pandas as class decorator.
51
+ extensions.ExtensionArray: Abstract base class for custom 1-D array types.
52
+
53
+ Notes
54
+ -----
55
+ The interface includes the following abstract methods that must
56
+ be implemented by subclasses:
57
+
58
+ * type
59
+ * name
60
+ * construct_array_type
61
+
62
+ The following attributes and methods influence the behavior of the dtype in
63
+ pandas operations
64
+
65
+ * _is_numeric
66
+ * _is_boolean
67
+ * _get_common_dtype
68
+
69
+ The `na_value` class attribute can be used to set the default NA value
70
+ for this type. :attr:`numpy.nan` is used by default.
71
+
72
+ ExtensionDtypes are required to be hashable. The base class provides
73
+ a default implementation, which relies on the ``_metadata`` class
74
+ attribute. ``_metadata`` should be a tuple containing the strings
75
+ that define your data type. For example, with ``PeriodDtype`` that's
76
+ the ``freq`` attribute.
77
+
78
+ **If you have a parametrized dtype you should set the ``_metadata``
79
+ class property**.
80
+
81
+ Ideally, the attributes in ``_metadata`` will match the
82
+ parameters to your ``ExtensionDtype.__init__`` (if any). If any of
83
+ the attributes in ``_metadata`` don't implement the standard
84
+ ``__eq__`` or ``__hash__``, the default implementations here will not
85
+ work.
86
+
87
+ Examples
88
+ --------
89
+
90
+ For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
91
+ can be implemented: this method receives a pyarrow Array or ChunkedArray
92
+ as only argument and is expected to return the appropriate pandas
93
+ ExtensionArray for this dtype and the passed values:
94
+
95
+ >>> import pyarrow
96
+ >>> from pandas.api.extensions import ExtensionArray
97
+ >>> class ExtensionDtype:
98
+ ... def __from_arrow__(
99
+ ... self,
100
+ ... array: pyarrow.Array | pyarrow.ChunkedArray
101
+ ... ) -> ExtensionArray:
102
+ ... ...
103
+
104
+ This class does not inherit from 'abc.ABCMeta' for performance reasons.
105
+ Methods and properties required by the interface raise
106
+ ``pandas.errors.AbstractMethodError`` and no ``register`` method is
107
+ provided for registering virtual subclasses.
108
+ """
109
+
110
+ _metadata: tuple[str, ...] = ()
111
+
112
+ def __str__(self) -> str:
113
+ return self.name
114
+
115
+ def __eq__(self, other: object) -> bool:
116
+ """
117
+ Check whether 'other' is equal to self.
118
+
119
+ By default, 'other' is considered equal if either
120
+
121
+ * it's a string matching 'self.name'.
122
+ * it's an instance of this type and all of the attributes
123
+ in ``self._metadata`` are equal between `self` and `other`.
124
+
125
+ Parameters
126
+ ----------
127
+ other : Any
128
+
129
+ Returns
130
+ -------
131
+ bool
132
+ """
133
+ if isinstance(other, str):
134
+ try:
135
+ other = self.construct_from_string(other)
136
+ except TypeError:
137
+ return False
138
+ if isinstance(other, type(self)):
139
+ return all(
140
+ getattr(self, attr) == getattr(other, attr) for attr in self._metadata
141
+ )
142
+ return False
143
+
144
+ def __hash__(self) -> int:
145
+ # for python>=3.10, different nan objects have different hashes
146
+ # we need to avoid that and thus use hash function with old behavior
147
+ return object_hash(tuple(getattr(self, attr) for attr in self._metadata))
148
+
149
+ def __ne__(self, other: object) -> bool:
150
+ return not self.__eq__(other)
151
+
152
+ @property
153
+ def na_value(self) -> object:
154
+ """
155
+ Default NA value to use for this type.
156
+
157
+ This is used in e.g. ExtensionArray.take. This should be the
158
+ user-facing "boxed" version of the NA value, not the physical NA value
159
+ for storage. e.g. for JSONArray, this is an empty dictionary.
160
+ """
161
+ return np.nan
162
+
163
+ @property
164
+ def type(self) -> type_t[Any]:
165
+ """
166
+ The scalar type for the array, e.g. ``int``
167
+
168
+ It's expected ``ExtensionArray[item]`` returns an instance
169
+ of ``ExtensionDtype.type`` for scalar ``item``, assuming
170
+ that value is valid (not NA). NA values do not need to be
171
+ instances of `type`.
172
+ """
173
+ raise AbstractMethodError(self)
174
+
175
+ @property
176
+ def kind(self) -> str:
177
+ """
178
+ A character code (one of 'biufcmMOSUV'), default 'O'
179
+
180
+ This should match the NumPy dtype used when the array is
181
+ converted to an ndarray, which is probably 'O' for object if
182
+ the extension type cannot be represented as a built-in NumPy
183
+ type.
184
+
185
+ See Also
186
+ --------
187
+ numpy.dtype.kind
188
+ """
189
+ return "O"
190
+
191
+ @property
192
+ def name(self) -> str:
193
+ """
194
+ A string identifying the data type.
195
+
196
+ Will be used for display in, e.g. ``Series.dtype``
197
+ """
198
+ raise AbstractMethodError(self)
199
+
200
+ @property
201
+ def names(self) -> list[str] | None:
202
+ """
203
+ Ordered list of field names, or None if there are no fields.
204
+
205
+ This is for compatibility with NumPy arrays, and may be removed in the
206
+ future.
207
+ """
208
+ return None
209
+
210
+ @classmethod
211
+ def construct_array_type(cls) -> type_t[ExtensionArray]:
212
+ """
213
+ Return the array type associated with this dtype.
214
+
215
+ Returns
216
+ -------
217
+ type
218
+ """
219
+ raise AbstractMethodError(cls)
220
+
221
+ def empty(self, shape: Shape) -> ExtensionArray:
222
+ """
223
+ Construct an ExtensionArray of this dtype with the given shape.
224
+
225
+ Analogous to numpy.empty.
226
+
227
+ Parameters
228
+ ----------
229
+ shape : int or tuple[int]
230
+
231
+ Returns
232
+ -------
233
+ ExtensionArray
234
+ """
235
+ cls = self.construct_array_type()
236
+ return cls._empty(shape, dtype=self)
237
+
238
+ @classmethod
239
+ def construct_from_string(cls, string: str) -> Self:
240
+ r"""
241
+ Construct this type from a string.
242
+
243
+ This is useful mainly for data types that accept parameters.
244
+ For example, a period dtype accepts a frequency parameter that
245
+ can be set as ``period[h]`` (where H means hourly frequency).
246
+
247
+ By default, in the abstract class, just the name of the type is
248
+ expected. But subclasses can overwrite this method to accept
249
+ parameters.
250
+
251
+ Parameters
252
+ ----------
253
+ string : str
254
+ The name of the type, for example ``category``.
255
+
256
+ Returns
257
+ -------
258
+ ExtensionDtype
259
+ Instance of the dtype.
260
+
261
+ Raises
262
+ ------
263
+ TypeError
264
+ If a class cannot be constructed from this 'string'.
265
+
266
+ Examples
267
+ --------
268
+ For extension dtypes with arguments the following may be an
269
+ adequate implementation.
270
+
271
+ >>> import re
272
+ >>> @classmethod
273
+ ... def construct_from_string(cls, string):
274
+ ... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
275
+ ... match = pattern.match(string)
276
+ ... if match:
277
+ ... return cls(**match.groupdict())
278
+ ... else:
279
+ ... raise TypeError(
280
+ ... f"Cannot construct a '{cls.__name__}' from '{string}'"
281
+ ... )
282
+ """
283
+ if not isinstance(string, str):
284
+ raise TypeError(
285
+ f"'construct_from_string' expects a string, got {type(string)}"
286
+ )
287
+ # error: Non-overlapping equality check (left operand type: "str", right
288
+ # operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
289
+ assert isinstance(cls.name, str), (cls, type(cls.name))
290
+ if string != cls.name:
291
+ raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
292
+ return cls()
293
+
294
+ @classmethod
295
+ def is_dtype(cls, dtype: object) -> bool:
296
+ """
297
+ Check if we match 'dtype'.
298
+
299
+ Parameters
300
+ ----------
301
+ dtype : object
302
+ The object to check.
303
+
304
+ Returns
305
+ -------
306
+ bool
307
+
308
+ Notes
309
+ -----
310
+ The default implementation is True if
311
+
312
+ 1. ``cls.construct_from_string(dtype)`` is an instance
313
+ of ``cls``.
314
+ 2. ``dtype`` is an object and is an instance of ``cls``
315
+ 3. ``dtype`` has a ``dtype`` attribute, and any of the above
316
+ conditions is true for ``dtype.dtype``.
317
+ """
318
+ dtype = getattr(dtype, "dtype", dtype)
319
+
320
+ if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)):
321
+ # https://github.com/pandas-dev/pandas/issues/22960
322
+ # avoid passing data to `construct_from_string`. This could
323
+ # cause a FutureWarning from numpy about failing elementwise
324
+ # comparison from, e.g., comparing DataFrame == 'category'.
325
+ return False
326
+ elif dtype is None:
327
+ return False
328
+ elif isinstance(dtype, cls):
329
+ return True
330
+ if isinstance(dtype, str):
331
+ try:
332
+ return cls.construct_from_string(dtype) is not None
333
+ except TypeError:
334
+ return False
335
+ return False
336
+
337
+ @property
338
+ def _is_numeric(self) -> bool:
339
+ """
340
+ Whether columns with this dtype should be considered numeric.
341
+
342
+ By default ExtensionDtypes are assumed to be non-numeric.
343
+ They'll be excluded from operations that exclude non-numeric
344
+ columns, like (groupby) reductions, plotting, etc.
345
+ """
346
+ return False
347
+
348
+ @property
349
+ def _is_boolean(self) -> bool:
350
+ """
351
+ Whether this dtype should be considered boolean.
352
+
353
+ By default, ExtensionDtypes are assumed to be non-numeric.
354
+ Setting this to True will affect the behavior of several places,
355
+ e.g.
356
+
357
+ * is_bool
358
+ * boolean indexing
359
+
360
+ Returns
361
+ -------
362
+ bool
363
+ """
364
+ return False
365
+
366
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
367
+ """
368
+ Return the common dtype, if one exists.
369
+
370
+ Used in `find_common_type` implementation. This is for example used
371
+ to determine the resulting dtype in a concat operation.
372
+
373
+ If no common dtype exists, return None (which gives the other dtypes
374
+ the chance to determine a common dtype). If all dtypes in the list
375
+ return None, then the common dtype will be "object" dtype (this means
376
+ it is never needed to return "object" dtype from this method itself).
377
+
378
+ Parameters
379
+ ----------
380
+ dtypes : list of dtypes
381
+ The dtypes for which to determine a common dtype. This is a list
382
+ of np.dtype or ExtensionDtype instances.
383
+
384
+ Returns
385
+ -------
386
+ Common dtype (np.dtype or ExtensionDtype) or None
387
+ """
388
+ if len(set(dtypes)) == 1:
389
+ # only itself
390
+ return self
391
+ else:
392
+ return None
393
+
394
+ @property
395
+ def _can_hold_na(self) -> bool:
396
+ """
397
+ Can arrays of this dtype hold NA values?
398
+ """
399
+ return True
400
+
401
+ @property
402
+ def _is_immutable(self) -> bool:
403
+ """
404
+ Can arrays with this dtype be modified with __setitem__? If not, return
405
+ True.
406
+
407
+ Immutable arrays are expected to raise TypeError on __setitem__ calls.
408
+ """
409
+ return False
410
+
411
+ @cache_readonly
412
+ def index_class(self) -> type_t[Index]:
413
+ """
414
+ The Index subclass to return from Index.__new__ when this dtype is
415
+ encountered.
416
+ """
417
+ from pandas import Index
418
+
419
+ return Index
420
+
421
+ @property
422
+ def _supports_2d(self) -> bool:
423
+ """
424
+ Do ExtensionArrays with this dtype support 2D arrays?
425
+
426
+ Historically ExtensionArrays were limited to 1D. By returning True here,
427
+ authors can indicate that their arrays support 2D instances. This can
428
+ improve performance in some cases, particularly operations with `axis=1`.
429
+
430
+ Arrays that support 2D values should:
431
+
432
+ - implement Array.reshape
433
+ - subclass the Dim2CompatTests in tests.extension.base
434
+ - _concat_same_type should support `axis` keyword
435
+ - _reduce and reductions should support `axis` keyword
436
+ """
437
+ return False
438
+
439
+ @property
440
+ def _can_fast_transpose(self) -> bool:
441
+ """
442
+ Is transposing an array with this dtype zero-copy?
443
+
444
+ Only relevant for cases where _supports_2d is True.
445
+ """
446
+ return False
447
+
448
+
449
+ class StorageExtensionDtype(ExtensionDtype):
450
+ """ExtensionDtype that may be backed by more than one implementation."""
451
+
452
+ name: str
453
+ _metadata = ("storage",)
454
+
455
+ def __init__(self, storage: str | None = None) -> None:
456
+ self.storage = storage
457
+
458
+ def __repr__(self) -> str:
459
+ return f"{self.name}[{self.storage}]"
460
+
461
+ def __str__(self) -> str:
462
+ return self.name
463
+
464
+ def __eq__(self, other: object) -> bool:
465
+ if isinstance(other, str) and other == self.name:
466
+ return True
467
+ return super().__eq__(other)
468
+
469
+ def __hash__(self) -> int:
470
+ # custom __eq__ so have to override __hash__
471
+ return super().__hash__()
472
+
473
+ @property
474
+ def na_value(self) -> libmissing.NAType:
475
+ return libmissing.NA
476
+
477
+
478
+ def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:
479
+ """
480
+ Register an ExtensionType with pandas as class decorator.
481
+
482
+ This enables operations like ``.astype(name)`` for the name
483
+ of the ExtensionDtype.
484
+
485
+ Returns
486
+ -------
487
+ callable
488
+ A class decorator.
489
+
490
+ Examples
491
+ --------
492
+ >>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype
493
+ >>> @register_extension_dtype
494
+ ... class MyExtensionDtype(ExtensionDtype):
495
+ ... name = "myextension"
496
+ """
497
+ _registry.register(cls)
498
+ return cls
499
+
500
+
501
+ class Registry:
502
+ """
503
+ Registry for dtype inference.
504
+
505
+ The registry allows one to map a string repr of a extension
506
+ dtype to an extension dtype. The string alias can be used in several
507
+ places, including
508
+
509
+ * Series and Index constructors
510
+ * :meth:`pandas.array`
511
+ * :meth:`pandas.Series.astype`
512
+
513
+ Multiple extension types can be registered.
514
+ These are tried in order.
515
+ """
516
+
517
+ def __init__(self) -> None:
518
+ self.dtypes: list[type_t[ExtensionDtype]] = []
519
+
520
+ def register(self, dtype: type_t[ExtensionDtype]) -> None:
521
+ """
522
+ Parameters
523
+ ----------
524
+ dtype : ExtensionDtype class
525
+ """
526
+ if not issubclass(dtype, ExtensionDtype):
527
+ raise ValueError("can only register pandas extension dtypes")
528
+
529
+ self.dtypes.append(dtype)
530
+
531
+ @overload
532
+ def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:
533
+ ...
534
+
535
+ @overload
536
+ def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT:
537
+ ...
538
+
539
+ @overload
540
+ def find(self, dtype: str) -> ExtensionDtype | None:
541
+ ...
542
+
543
+ @overload
544
+ def find(
545
+ self, dtype: npt.DTypeLike
546
+ ) -> type_t[ExtensionDtype] | ExtensionDtype | None:
547
+ ...
548
+
549
+ def find(
550
+ self, dtype: type_t[ExtensionDtype] | ExtensionDtype | npt.DTypeLike
551
+ ) -> type_t[ExtensionDtype] | ExtensionDtype | None:
552
+ """
553
+ Parameters
554
+ ----------
555
+ dtype : ExtensionDtype class or instance or str or numpy dtype or python type
556
+
557
+ Returns
558
+ -------
559
+ return the first matching dtype, otherwise return None
560
+ """
561
+ if not isinstance(dtype, str):
562
+ dtype_type: type_t
563
+ if not isinstance(dtype, type):
564
+ dtype_type = type(dtype)
565
+ else:
566
+ dtype_type = dtype
567
+ if issubclass(dtype_type, ExtensionDtype):
568
+ # cast needed here as mypy doesn't know we have figured
569
+ # out it is an ExtensionDtype or type_t[ExtensionDtype]
570
+ return cast("ExtensionDtype | type_t[ExtensionDtype]", dtype)
571
+
572
+ return None
573
+
574
+ for dtype_type in self.dtypes:
575
+ try:
576
+ return dtype_type.construct_from_string(dtype)
577
+ except TypeError:
578
+ pass
579
+
580
+ return None
581
+
582
+
583
+ _registry = Registry()
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/common.py ADDED
@@ -0,0 +1,1748 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Common type operations.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ Callable,
10
+ )
11
+ import warnings
12
+
13
+ import numpy as np
14
+
15
+ from pandas._libs import (
16
+ Interval,
17
+ Period,
18
+ algos,
19
+ lib,
20
+ )
21
+ from pandas._libs.tslibs import conversion
22
+ from pandas.util._exceptions import find_stack_level
23
+
24
+ from pandas.core.dtypes.base import _registry as registry
25
+ from pandas.core.dtypes.dtypes import (
26
+ CategoricalDtype,
27
+ DatetimeTZDtype,
28
+ ExtensionDtype,
29
+ IntervalDtype,
30
+ PeriodDtype,
31
+ SparseDtype,
32
+ )
33
+ from pandas.core.dtypes.generic import ABCIndex
34
+ from pandas.core.dtypes.inference import (
35
+ is_array_like,
36
+ is_bool,
37
+ is_complex,
38
+ is_dataclass,
39
+ is_decimal,
40
+ is_dict_like,
41
+ is_file_like,
42
+ is_float,
43
+ is_hashable,
44
+ is_integer,
45
+ is_interval,
46
+ is_iterator,
47
+ is_list_like,
48
+ is_named_tuple,
49
+ is_nested_list_like,
50
+ is_number,
51
+ is_re,
52
+ is_re_compilable,
53
+ is_scalar,
54
+ is_sequence,
55
+ )
56
+
57
+ if TYPE_CHECKING:
58
+ from pandas._typing import (
59
+ ArrayLike,
60
+ DtypeObj,
61
+ )
62
+
63
+ DT64NS_DTYPE = conversion.DT64NS_DTYPE
64
+ TD64NS_DTYPE = conversion.TD64NS_DTYPE
65
+ INT64_DTYPE = np.dtype(np.int64)
66
+
67
+ # oh the troubles to reduce import time
68
+ _is_scipy_sparse = None
69
+
70
+ ensure_float64 = algos.ensure_float64
71
+ ensure_int64 = algos.ensure_int64
72
+ ensure_int32 = algos.ensure_int32
73
+ ensure_int16 = algos.ensure_int16
74
+ ensure_int8 = algos.ensure_int8
75
+ ensure_platform_int = algos.ensure_platform_int
76
+ ensure_object = algos.ensure_object
77
+ ensure_uint64 = algos.ensure_uint64
78
+
79
+
80
+ def ensure_str(value: bytes | Any) -> str:
81
+ """
82
+ Ensure that bytes and non-strings get converted into ``str`` objects.
83
+ """
84
+ if isinstance(value, bytes):
85
+ value = value.decode("utf-8")
86
+ elif not isinstance(value, str):
87
+ value = str(value)
88
+ return value
89
+
90
+
91
+ def ensure_python_int(value: int | np.integer) -> int:
92
+ """
93
+ Ensure that a value is a python int.
94
+
95
+ Parameters
96
+ ----------
97
+ value: int or numpy.integer
98
+
99
+ Returns
100
+ -------
101
+ int
102
+
103
+ Raises
104
+ ------
105
+ TypeError: if the value isn't an int or can't be converted to one.
106
+ """
107
+ if not (is_integer(value) or is_float(value)):
108
+ if not is_scalar(value):
109
+ raise TypeError(
110
+ f"Value needs to be a scalar value, was type {type(value).__name__}"
111
+ )
112
+ raise TypeError(f"Wrong type {type(value)} for value {value}")
113
+ try:
114
+ new_value = int(value)
115
+ assert new_value == value
116
+ except (TypeError, ValueError, AssertionError) as err:
117
+ raise TypeError(f"Wrong type {type(value)} for value {value}") from err
118
+ return new_value
119
+
120
+
121
+ def classes(*klasses) -> Callable:
122
+ """Evaluate if the tipo is a subclass of the klasses."""
123
+ return lambda tipo: issubclass(tipo, klasses)
124
+
125
+
126
+ def _classes_and_not_datetimelike(*klasses) -> Callable:
127
+ """
128
+ Evaluate if the tipo is a subclass of the klasses
129
+ and not a datetimelike.
130
+ """
131
+ return lambda tipo: (
132
+ issubclass(tipo, klasses)
133
+ and not issubclass(tipo, (np.datetime64, np.timedelta64))
134
+ )
135
+
136
+
137
+ def is_object_dtype(arr_or_dtype) -> bool:
138
+ """
139
+ Check whether an array-like or dtype is of the object dtype.
140
+
141
+ Parameters
142
+ ----------
143
+ arr_or_dtype : array-like or dtype
144
+ The array-like or dtype to check.
145
+
146
+ Returns
147
+ -------
148
+ boolean
149
+ Whether or not the array-like or dtype is of the object dtype.
150
+
151
+ Examples
152
+ --------
153
+ >>> from pandas.api.types import is_object_dtype
154
+ >>> is_object_dtype(object)
155
+ True
156
+ >>> is_object_dtype(int)
157
+ False
158
+ >>> is_object_dtype(np.array([], dtype=object))
159
+ True
160
+ >>> is_object_dtype(np.array([], dtype=int))
161
+ False
162
+ >>> is_object_dtype([1, 2, 3])
163
+ False
164
+ """
165
+ return _is_dtype_type(arr_or_dtype, classes(np.object_))
166
+
167
+
168
+ def is_sparse(arr) -> bool:
169
+ """
170
+ Check whether an array-like is a 1-D pandas sparse array.
171
+
172
+ .. deprecated:: 2.1.0
173
+ Use isinstance(dtype, pd.SparseDtype) instead.
174
+
175
+ Check that the one-dimensional array-like is a pandas sparse array.
176
+ Returns True if it is a pandas sparse array, not another type of
177
+ sparse array.
178
+
179
+ Parameters
180
+ ----------
181
+ arr : array-like
182
+ Array-like to check.
183
+
184
+ Returns
185
+ -------
186
+ bool
187
+ Whether or not the array-like is a pandas sparse array.
188
+
189
+ Examples
190
+ --------
191
+ Returns `True` if the parameter is a 1-D pandas sparse array.
192
+
193
+ >>> from pandas.api.types import is_sparse
194
+ >>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0]))
195
+ True
196
+ >>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0])))
197
+ True
198
+
199
+ Returns `False` if the parameter is not sparse.
200
+
201
+ >>> is_sparse(np.array([0, 0, 1, 0]))
202
+ False
203
+ >>> is_sparse(pd.Series([0, 1, 0, 0]))
204
+ False
205
+
206
+ Returns `False` if the parameter is not a pandas sparse array.
207
+
208
+ >>> from scipy.sparse import bsr_matrix
209
+ >>> is_sparse(bsr_matrix([0, 1, 0, 0]))
210
+ False
211
+
212
+ Returns `False` if the parameter has more than one dimension.
213
+ """
214
+ warnings.warn(
215
+ "is_sparse is deprecated and will be removed in a future "
216
+ "version. Check `isinstance(dtype, pd.SparseDtype)` instead.",
217
+ DeprecationWarning,
218
+ stacklevel=2,
219
+ )
220
+
221
+ dtype = getattr(arr, "dtype", arr)
222
+ return isinstance(dtype, SparseDtype)
223
+
224
+
225
+ def is_scipy_sparse(arr) -> bool:
226
+ """
227
+ Check whether an array-like is a scipy.sparse.spmatrix instance.
228
+
229
+ Parameters
230
+ ----------
231
+ arr : array-like
232
+ The array-like to check.
233
+
234
+ Returns
235
+ -------
236
+ boolean
237
+ Whether or not the array-like is a scipy.sparse.spmatrix instance.
238
+
239
+ Notes
240
+ -----
241
+ If scipy is not installed, this function will always return False.
242
+
243
+ Examples
244
+ --------
245
+ >>> from scipy.sparse import bsr_matrix
246
+ >>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
247
+ True
248
+ >>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3]))
249
+ False
250
+ """
251
+ global _is_scipy_sparse
252
+
253
+ if _is_scipy_sparse is None: # pylint: disable=used-before-assignment
254
+ try:
255
+ from scipy.sparse import issparse as _is_scipy_sparse
256
+ except ImportError:
257
+ _is_scipy_sparse = lambda _: False
258
+
259
+ assert _is_scipy_sparse is not None
260
+ return _is_scipy_sparse(arr)
261
+
262
+
263
+ def is_datetime64_dtype(arr_or_dtype) -> bool:
264
+ """
265
+ Check whether an array-like or dtype is of the datetime64 dtype.
266
+
267
+ Parameters
268
+ ----------
269
+ arr_or_dtype : array-like or dtype
270
+ The array-like or dtype to check.
271
+
272
+ Returns
273
+ -------
274
+ boolean
275
+ Whether or not the array-like or dtype is of the datetime64 dtype.
276
+
277
+ Examples
278
+ --------
279
+ >>> from pandas.api.types import is_datetime64_dtype
280
+ >>> is_datetime64_dtype(object)
281
+ False
282
+ >>> is_datetime64_dtype(np.datetime64)
283
+ True
284
+ >>> is_datetime64_dtype(np.array([], dtype=int))
285
+ False
286
+ >>> is_datetime64_dtype(np.array([], dtype=np.datetime64))
287
+ True
288
+ >>> is_datetime64_dtype([1, 2, 3])
289
+ False
290
+ """
291
+ if isinstance(arr_or_dtype, np.dtype):
292
+ # GH#33400 fastpath for dtype object
293
+ return arr_or_dtype.kind == "M"
294
+ return _is_dtype_type(arr_or_dtype, classes(np.datetime64))
295
+
296
+
297
+ def is_datetime64tz_dtype(arr_or_dtype) -> bool:
298
+ """
299
+ Check whether an array-like or dtype is of a DatetimeTZDtype dtype.
300
+
301
+ .. deprecated:: 2.1.0
302
+ Use isinstance(dtype, pd.DatetimeTZDtype) instead.
303
+
304
+ Parameters
305
+ ----------
306
+ arr_or_dtype : array-like or dtype
307
+ The array-like or dtype to check.
308
+
309
+ Returns
310
+ -------
311
+ boolean
312
+ Whether or not the array-like or dtype is of a DatetimeTZDtype dtype.
313
+
314
+ Examples
315
+ --------
316
+ >>> from pandas.api.types import is_datetime64tz_dtype
317
+ >>> is_datetime64tz_dtype(object)
318
+ False
319
+ >>> is_datetime64tz_dtype([1, 2, 3])
320
+ False
321
+ >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive
322
+ False
323
+ >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
324
+ True
325
+
326
+ >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
327
+ >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
328
+ >>> s = pd.Series([], dtype=dtype)
329
+ >>> is_datetime64tz_dtype(dtype)
330
+ True
331
+ >>> is_datetime64tz_dtype(s)
332
+ True
333
+ """
334
+ # GH#52607
335
+ warnings.warn(
336
+ "is_datetime64tz_dtype is deprecated and will be removed in a future "
337
+ "version. Check `isinstance(dtype, pd.DatetimeTZDtype)` instead.",
338
+ DeprecationWarning,
339
+ stacklevel=2,
340
+ )
341
+ if isinstance(arr_or_dtype, DatetimeTZDtype):
342
+ # GH#33400 fastpath for dtype object
343
+ # GH 34986
344
+ return True
345
+
346
+ if arr_or_dtype is None:
347
+ return False
348
+ return DatetimeTZDtype.is_dtype(arr_or_dtype)
349
+
350
+
351
+ def is_timedelta64_dtype(arr_or_dtype) -> bool:
352
+ """
353
+ Check whether an array-like or dtype is of the timedelta64 dtype.
354
+
355
+ Parameters
356
+ ----------
357
+ arr_or_dtype : array-like or dtype
358
+ The array-like or dtype to check.
359
+
360
+ Returns
361
+ -------
362
+ boolean
363
+ Whether or not the array-like or dtype is of the timedelta64 dtype.
364
+
365
+ Examples
366
+ --------
367
+ >>> from pandas.core.dtypes.common import is_timedelta64_dtype
368
+ >>> is_timedelta64_dtype(object)
369
+ False
370
+ >>> is_timedelta64_dtype(np.timedelta64)
371
+ True
372
+ >>> is_timedelta64_dtype([1, 2, 3])
373
+ False
374
+ >>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
375
+ True
376
+ >>> is_timedelta64_dtype('0 days')
377
+ False
378
+ """
379
+ if isinstance(arr_or_dtype, np.dtype):
380
+ # GH#33400 fastpath for dtype object
381
+ return arr_or_dtype.kind == "m"
382
+
383
+ return _is_dtype_type(arr_or_dtype, classes(np.timedelta64))
384
+
385
+
386
+ def is_period_dtype(arr_or_dtype) -> bool:
387
+ """
388
+ Check whether an array-like or dtype is of the Period dtype.
389
+
390
+ .. deprecated:: 2.2.0
391
+ Use isinstance(dtype, pd.Period) instead.
392
+
393
+ Parameters
394
+ ----------
395
+ arr_or_dtype : array-like or dtype
396
+ The array-like or dtype to check.
397
+
398
+ Returns
399
+ -------
400
+ boolean
401
+ Whether or not the array-like or dtype is of the Period dtype.
402
+
403
+ Examples
404
+ --------
405
+ >>> from pandas.core.dtypes.common import is_period_dtype
406
+ >>> is_period_dtype(object)
407
+ False
408
+ >>> is_period_dtype(pd.PeriodDtype(freq="D"))
409
+ True
410
+ >>> is_period_dtype([1, 2, 3])
411
+ False
412
+ >>> is_period_dtype(pd.Period("2017-01-01"))
413
+ False
414
+ >>> is_period_dtype(pd.PeriodIndex([], freq="Y"))
415
+ True
416
+ """
417
+ warnings.warn(
418
+ "is_period_dtype is deprecated and will be removed in a future version. "
419
+ "Use `isinstance(dtype, pd.PeriodDtype)` instead",
420
+ DeprecationWarning,
421
+ stacklevel=2,
422
+ )
423
+ if isinstance(arr_or_dtype, ExtensionDtype):
424
+ # GH#33400 fastpath for dtype object
425
+ return arr_or_dtype.type is Period
426
+
427
+ if arr_or_dtype is None:
428
+ return False
429
+ return PeriodDtype.is_dtype(arr_or_dtype)
430
+
431
+
432
+ def is_interval_dtype(arr_or_dtype) -> bool:
433
+ """
434
+ Check whether an array-like or dtype is of the Interval dtype.
435
+
436
+ .. deprecated:: 2.2.0
437
+ Use isinstance(dtype, pd.IntervalDtype) instead.
438
+
439
+ Parameters
440
+ ----------
441
+ arr_or_dtype : array-like or dtype
442
+ The array-like or dtype to check.
443
+
444
+ Returns
445
+ -------
446
+ boolean
447
+ Whether or not the array-like or dtype is of the Interval dtype.
448
+
449
+ Examples
450
+ --------
451
+ >>> from pandas.core.dtypes.common import is_interval_dtype
452
+ >>> is_interval_dtype(object)
453
+ False
454
+ >>> is_interval_dtype(pd.IntervalDtype())
455
+ True
456
+ >>> is_interval_dtype([1, 2, 3])
457
+ False
458
+ >>>
459
+ >>> interval = pd.Interval(1, 2, closed="right")
460
+ >>> is_interval_dtype(interval)
461
+ False
462
+ >>> is_interval_dtype(pd.IntervalIndex([interval]))
463
+ True
464
+ """
465
+ # GH#52607
466
+ warnings.warn(
467
+ "is_interval_dtype is deprecated and will be removed in a future version. "
468
+ "Use `isinstance(dtype, pd.IntervalDtype)` instead",
469
+ DeprecationWarning,
470
+ stacklevel=2,
471
+ )
472
+ if isinstance(arr_or_dtype, ExtensionDtype):
473
+ # GH#33400 fastpath for dtype object
474
+ return arr_or_dtype.type is Interval
475
+
476
+ if arr_or_dtype is None:
477
+ return False
478
+ return IntervalDtype.is_dtype(arr_or_dtype)
479
+
480
+
481
+ def is_categorical_dtype(arr_or_dtype) -> bool:
482
+ """
483
+ Check whether an array-like or dtype is of the Categorical dtype.
484
+
485
+ .. deprecated:: 2.2.0
486
+ Use isinstance(dtype, pd.CategoricalDtype) instead.
487
+
488
+ Parameters
489
+ ----------
490
+ arr_or_dtype : array-like or dtype
491
+ The array-like or dtype to check.
492
+
493
+ Returns
494
+ -------
495
+ boolean
496
+ Whether or not the array-like or dtype is of the Categorical dtype.
497
+
498
+ Examples
499
+ --------
500
+ >>> from pandas.api.types import is_categorical_dtype
501
+ >>> from pandas import CategoricalDtype
502
+ >>> is_categorical_dtype(object)
503
+ False
504
+ >>> is_categorical_dtype(CategoricalDtype())
505
+ True
506
+ >>> is_categorical_dtype([1, 2, 3])
507
+ False
508
+ >>> is_categorical_dtype(pd.Categorical([1, 2, 3]))
509
+ True
510
+ >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
511
+ True
512
+ """
513
+ # GH#52527
514
+ warnings.warn(
515
+ "is_categorical_dtype is deprecated and will be removed in a future "
516
+ "version. Use isinstance(dtype, pd.CategoricalDtype) instead",
517
+ DeprecationWarning,
518
+ stacklevel=2,
519
+ )
520
+ if isinstance(arr_or_dtype, ExtensionDtype):
521
+ # GH#33400 fastpath for dtype object
522
+ return arr_or_dtype.name == "category"
523
+
524
+ if arr_or_dtype is None:
525
+ return False
526
+ return CategoricalDtype.is_dtype(arr_or_dtype)
527
+
528
+
529
+ def is_string_or_object_np_dtype(dtype: np.dtype) -> bool:
530
+ """
531
+ Faster alternative to is_string_dtype, assumes we have a np.dtype object.
532
+ """
533
+ return dtype == object or dtype.kind in "SU"
534
+
535
+
536
+ def is_string_dtype(arr_or_dtype) -> bool:
537
+ """
538
+ Check whether the provided array or dtype is of the string dtype.
539
+
540
+ If an array is passed with an object dtype, the elements must be
541
+ inferred as strings.
542
+
543
+ Parameters
544
+ ----------
545
+ arr_or_dtype : array-like or dtype
546
+ The array or dtype to check.
547
+
548
+ Returns
549
+ -------
550
+ boolean
551
+ Whether or not the array or dtype is of the string dtype.
552
+
553
+ Examples
554
+ --------
555
+ >>> from pandas.api.types import is_string_dtype
556
+ >>> is_string_dtype(str)
557
+ True
558
+ >>> is_string_dtype(object)
559
+ True
560
+ >>> is_string_dtype(int)
561
+ False
562
+ >>> is_string_dtype(np.array(['a', 'b']))
563
+ True
564
+ >>> is_string_dtype(pd.Series([1, 2]))
565
+ False
566
+ >>> is_string_dtype(pd.Series([1, 2], dtype=object))
567
+ False
568
+ """
569
+ if hasattr(arr_or_dtype, "dtype") and _get_dtype(arr_or_dtype).kind == "O":
570
+ return is_all_strings(arr_or_dtype)
571
+
572
+ def condition(dtype) -> bool:
573
+ if is_string_or_object_np_dtype(dtype):
574
+ return True
575
+ try:
576
+ return dtype == "string"
577
+ except TypeError:
578
+ return False
579
+
580
+ return _is_dtype(arr_or_dtype, condition)
581
+
582
+
583
+ def is_dtype_equal(source, target) -> bool:
584
+ """
585
+ Check if two dtypes are equal.
586
+
587
+ Parameters
588
+ ----------
589
+ source : The first dtype to compare
590
+ target : The second dtype to compare
591
+
592
+ Returns
593
+ -------
594
+ boolean
595
+ Whether or not the two dtypes are equal.
596
+
597
+ Examples
598
+ --------
599
+ >>> is_dtype_equal(int, float)
600
+ False
601
+ >>> is_dtype_equal("int", int)
602
+ True
603
+ >>> is_dtype_equal(object, "category")
604
+ False
605
+ >>> is_dtype_equal(CategoricalDtype(), "category")
606
+ True
607
+ >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64")
608
+ False
609
+ """
610
+ if isinstance(target, str):
611
+ if not isinstance(source, str):
612
+ # GH#38516 ensure we get the same behavior from
613
+ # is_dtype_equal(CDT, "category") and CDT == "category"
614
+ try:
615
+ src = _get_dtype(source)
616
+ if isinstance(src, ExtensionDtype):
617
+ return src == target
618
+ except (TypeError, AttributeError, ImportError):
619
+ return False
620
+ elif isinstance(source, str):
621
+ return is_dtype_equal(target, source)
622
+
623
+ try:
624
+ source = _get_dtype(source)
625
+ target = _get_dtype(target)
626
+ return source == target
627
+ except (TypeError, AttributeError, ImportError):
628
+ # invalid comparison
629
+ # object == category will hit this
630
+ return False
631
+
632
+
633
+ def is_integer_dtype(arr_or_dtype) -> bool:
634
+ """
635
+ Check whether the provided array or dtype is of an integer dtype.
636
+
637
+ Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
638
+
639
+ The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
640
+ as integer by this function.
641
+
642
+ Parameters
643
+ ----------
644
+ arr_or_dtype : array-like or dtype
645
+ The array or dtype to check.
646
+
647
+ Returns
648
+ -------
649
+ boolean
650
+ Whether or not the array or dtype is of an integer dtype and
651
+ not an instance of timedelta64.
652
+
653
+ Examples
654
+ --------
655
+ >>> from pandas.api.types import is_integer_dtype
656
+ >>> is_integer_dtype(str)
657
+ False
658
+ >>> is_integer_dtype(int)
659
+ True
660
+ >>> is_integer_dtype(float)
661
+ False
662
+ >>> is_integer_dtype(np.uint64)
663
+ True
664
+ >>> is_integer_dtype('int8')
665
+ True
666
+ >>> is_integer_dtype('Int8')
667
+ True
668
+ >>> is_integer_dtype(pd.Int8Dtype)
669
+ True
670
+ >>> is_integer_dtype(np.datetime64)
671
+ False
672
+ >>> is_integer_dtype(np.timedelta64)
673
+ False
674
+ >>> is_integer_dtype(np.array(['a', 'b']))
675
+ False
676
+ >>> is_integer_dtype(pd.Series([1, 2]))
677
+ True
678
+ >>> is_integer_dtype(np.array([], dtype=np.timedelta64))
679
+ False
680
+ >>> is_integer_dtype(pd.Index([1, 2.])) # float
681
+ False
682
+ """
683
+ return _is_dtype_type(
684
+ arr_or_dtype, _classes_and_not_datetimelike(np.integer)
685
+ ) or _is_dtype(
686
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu"
687
+ )
688
+
689
+
690
+ def is_signed_integer_dtype(arr_or_dtype) -> bool:
691
+ """
692
+ Check whether the provided array or dtype is of a signed integer dtype.
693
+
694
+ Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
695
+
696
+ The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
697
+ as integer by this function.
698
+
699
+ Parameters
700
+ ----------
701
+ arr_or_dtype : array-like or dtype
702
+ The array or dtype to check.
703
+
704
+ Returns
705
+ -------
706
+ boolean
707
+ Whether or not the array or dtype is of a signed integer dtype
708
+ and not an instance of timedelta64.
709
+
710
+ Examples
711
+ --------
712
+ >>> from pandas.core.dtypes.common import is_signed_integer_dtype
713
+ >>> is_signed_integer_dtype(str)
714
+ False
715
+ >>> is_signed_integer_dtype(int)
716
+ True
717
+ >>> is_signed_integer_dtype(float)
718
+ False
719
+ >>> is_signed_integer_dtype(np.uint64) # unsigned
720
+ False
721
+ >>> is_signed_integer_dtype('int8')
722
+ True
723
+ >>> is_signed_integer_dtype('Int8')
724
+ True
725
+ >>> is_signed_integer_dtype(pd.Int8Dtype)
726
+ True
727
+ >>> is_signed_integer_dtype(np.datetime64)
728
+ False
729
+ >>> is_signed_integer_dtype(np.timedelta64)
730
+ False
731
+ >>> is_signed_integer_dtype(np.array(['a', 'b']))
732
+ False
733
+ >>> is_signed_integer_dtype(pd.Series([1, 2]))
734
+ True
735
+ >>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
736
+ False
737
+ >>> is_signed_integer_dtype(pd.Index([1, 2.])) # float
738
+ False
739
+ >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
740
+ False
741
+ """
742
+ return _is_dtype_type(
743
+ arr_or_dtype, _classes_and_not_datetimelike(np.signedinteger)
744
+ ) or _is_dtype(
745
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "i"
746
+ )
747
+
748
+
749
+ def is_unsigned_integer_dtype(arr_or_dtype) -> bool:
750
+ """
751
+ Check whether the provided array or dtype is of an unsigned integer dtype.
752
+
753
+ The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also
754
+ considered as integer by this function.
755
+
756
+ Parameters
757
+ ----------
758
+ arr_or_dtype : array-like or dtype
759
+ The array or dtype to check.
760
+
761
+ Returns
762
+ -------
763
+ boolean
764
+ Whether or not the array or dtype is of an unsigned integer dtype.
765
+
766
+ Examples
767
+ --------
768
+ >>> from pandas.api.types import is_unsigned_integer_dtype
769
+ >>> is_unsigned_integer_dtype(str)
770
+ False
771
+ >>> is_unsigned_integer_dtype(int) # signed
772
+ False
773
+ >>> is_unsigned_integer_dtype(float)
774
+ False
775
+ >>> is_unsigned_integer_dtype(np.uint64)
776
+ True
777
+ >>> is_unsigned_integer_dtype('uint8')
778
+ True
779
+ >>> is_unsigned_integer_dtype('UInt8')
780
+ True
781
+ >>> is_unsigned_integer_dtype(pd.UInt8Dtype)
782
+ True
783
+ >>> is_unsigned_integer_dtype(np.array(['a', 'b']))
784
+ False
785
+ >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed
786
+ False
787
+ >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float
788
+ False
789
+ >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
790
+ True
791
+ """
792
+ return _is_dtype_type(
793
+ arr_or_dtype, _classes_and_not_datetimelike(np.unsignedinteger)
794
+ ) or _is_dtype(
795
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "u"
796
+ )
797
+
798
+
799
+ def is_int64_dtype(arr_or_dtype) -> bool:
800
+ """
801
+ Check whether the provided array or dtype is of the int64 dtype.
802
+
803
+ .. deprecated:: 2.1.0
804
+
805
+ is_int64_dtype is deprecated and will be removed in a future
806
+ version. Use dtype == np.int64 instead.
807
+
808
+ Parameters
809
+ ----------
810
+ arr_or_dtype : array-like or dtype
811
+ The array or dtype to check.
812
+
813
+ Returns
814
+ -------
815
+ boolean
816
+ Whether or not the array or dtype is of the int64 dtype.
817
+
818
+ Notes
819
+ -----
820
+ Depending on system architecture, the return value of `is_int64_dtype(
821
+ int)` will be True if the OS uses 64-bit integers and False if the OS
822
+ uses 32-bit integers.
823
+
824
+ Examples
825
+ --------
826
+ >>> from pandas.api.types import is_int64_dtype
827
+ >>> is_int64_dtype(str) # doctest: +SKIP
828
+ False
829
+ >>> is_int64_dtype(np.int32) # doctest: +SKIP
830
+ False
831
+ >>> is_int64_dtype(np.int64) # doctest: +SKIP
832
+ True
833
+ >>> is_int64_dtype('int8') # doctest: +SKIP
834
+ False
835
+ >>> is_int64_dtype('Int8') # doctest: +SKIP
836
+ False
837
+ >>> is_int64_dtype(pd.Int64Dtype) # doctest: +SKIP
838
+ True
839
+ >>> is_int64_dtype(float) # doctest: +SKIP
840
+ False
841
+ >>> is_int64_dtype(np.uint64) # unsigned # doctest: +SKIP
842
+ False
843
+ >>> is_int64_dtype(np.array(['a', 'b'])) # doctest: +SKIP
844
+ False
845
+ >>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) # doctest: +SKIP
846
+ True
847
+ >>> is_int64_dtype(pd.Index([1, 2.])) # float # doctest: +SKIP
848
+ False
849
+ >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned # doctest: +SKIP
850
+ False
851
+ """
852
+ # GH#52564
853
+ warnings.warn(
854
+ "is_int64_dtype is deprecated and will be removed in a future "
855
+ "version. Use dtype == np.int64 instead.",
856
+ DeprecationWarning,
857
+ stacklevel=2,
858
+ )
859
+ return _is_dtype_type(arr_or_dtype, classes(np.int64))
860
+
861
+
862
+ def is_datetime64_any_dtype(arr_or_dtype) -> bool:
863
+ """
864
+ Check whether the provided array or dtype is of the datetime64 dtype.
865
+
866
+ Parameters
867
+ ----------
868
+ arr_or_dtype : array-like or dtype
869
+ The array or dtype to check.
870
+
871
+ Returns
872
+ -------
873
+ bool
874
+ Whether or not the array or dtype is of the datetime64 dtype.
875
+
876
+ Examples
877
+ --------
878
+ >>> from pandas.api.types import is_datetime64_any_dtype
879
+ >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
880
+ >>> is_datetime64_any_dtype(str)
881
+ False
882
+ >>> is_datetime64_any_dtype(int)
883
+ False
884
+ >>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive
885
+ True
886
+ >>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
887
+ True
888
+ >>> is_datetime64_any_dtype(np.array(['a', 'b']))
889
+ False
890
+ >>> is_datetime64_any_dtype(np.array([1, 2]))
891
+ False
892
+ >>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]"))
893
+ True
894
+ >>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
895
+ True
896
+ """
897
+ if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)):
898
+ # GH#33400 fastpath for dtype object
899
+ return arr_or_dtype.kind == "M"
900
+
901
+ if arr_or_dtype is None:
902
+ return False
903
+
904
+ try:
905
+ tipo = _get_dtype(arr_or_dtype)
906
+ except TypeError:
907
+ return False
908
+ return lib.is_np_dtype(tipo, "M") or isinstance(tipo, DatetimeTZDtype)
909
+
910
+
911
+ def is_datetime64_ns_dtype(arr_or_dtype) -> bool:
912
+ """
913
+ Check whether the provided array or dtype is of the datetime64[ns] dtype.
914
+
915
+ Parameters
916
+ ----------
917
+ arr_or_dtype : array-like or dtype
918
+ The array or dtype to check.
919
+
920
+ Returns
921
+ -------
922
+ bool
923
+ Whether or not the array or dtype is of the datetime64[ns] dtype.
924
+
925
+ Examples
926
+ --------
927
+ >>> from pandas.api.types import is_datetime64_ns_dtype
928
+ >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
929
+ >>> is_datetime64_ns_dtype(str)
930
+ False
931
+ >>> is_datetime64_ns_dtype(int)
932
+ False
933
+ >>> is_datetime64_ns_dtype(np.datetime64) # no unit
934
+ False
935
+ >>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
936
+ True
937
+ >>> is_datetime64_ns_dtype(np.array(['a', 'b']))
938
+ False
939
+ >>> is_datetime64_ns_dtype(np.array([1, 2]))
940
+ False
941
+ >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64")) # no unit
942
+ False
943
+ >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) # wrong unit
944
+ False
945
+ >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
946
+ True
947
+ """
948
+ if arr_or_dtype is None:
949
+ return False
950
+ try:
951
+ tipo = _get_dtype(arr_or_dtype)
952
+ except TypeError:
953
+ return False
954
+ return tipo == DT64NS_DTYPE or (
955
+ isinstance(tipo, DatetimeTZDtype) and tipo.unit == "ns"
956
+ )
957
+
958
+
959
+ def is_timedelta64_ns_dtype(arr_or_dtype) -> bool:
960
+ """
961
+ Check whether the provided array or dtype is of the timedelta64[ns] dtype.
962
+
963
+ This is a very specific dtype, so generic ones like `np.timedelta64`
964
+ will return False if passed into this function.
965
+
966
+ Parameters
967
+ ----------
968
+ arr_or_dtype : array-like or dtype
969
+ The array or dtype to check.
970
+
971
+ Returns
972
+ -------
973
+ boolean
974
+ Whether or not the array or dtype is of the timedelta64[ns] dtype.
975
+
976
+ Examples
977
+ --------
978
+ >>> from pandas.core.dtypes.common import is_timedelta64_ns_dtype
979
+ >>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
980
+ True
981
+ >>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency
982
+ False
983
+ >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
984
+ True
985
+ >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
986
+ False
987
+ """
988
+ return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE)
989
+
990
+
991
+ # This exists to silence numpy deprecation warnings, see GH#29553
992
+ def is_numeric_v_string_like(a: ArrayLike, b) -> bool:
993
+ """
994
+ Check if we are comparing a string-like object to a numeric ndarray.
995
+ NumPy doesn't like to compare such objects, especially numeric arrays
996
+ and scalar string-likes.
997
+
998
+ Parameters
999
+ ----------
1000
+ a : array-like, scalar
1001
+ The first object to check.
1002
+ b : array-like, scalar
1003
+ The second object to check.
1004
+
1005
+ Returns
1006
+ -------
1007
+ boolean
1008
+ Whether we return a comparing a string-like object to a numeric array.
1009
+
1010
+ Examples
1011
+ --------
1012
+ >>> is_numeric_v_string_like(np.array([1]), "foo")
1013
+ True
1014
+ >>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
1015
+ True
1016
+ >>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
1017
+ True
1018
+ >>> is_numeric_v_string_like(np.array([1]), np.array([2]))
1019
+ False
1020
+ >>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
1021
+ False
1022
+ """
1023
+ is_a_array = isinstance(a, np.ndarray)
1024
+ is_b_array = isinstance(b, np.ndarray)
1025
+
1026
+ is_a_numeric_array = is_a_array and a.dtype.kind in ("u", "i", "f", "c", "b")
1027
+ is_b_numeric_array = is_b_array and b.dtype.kind in ("u", "i", "f", "c", "b")
1028
+ is_a_string_array = is_a_array and a.dtype.kind in ("S", "U")
1029
+ is_b_string_array = is_b_array and b.dtype.kind in ("S", "U")
1030
+
1031
+ is_b_scalar_string_like = not is_b_array and isinstance(b, str)
1032
+
1033
+ return (
1034
+ (is_a_numeric_array and is_b_scalar_string_like)
1035
+ or (is_a_numeric_array and is_b_string_array)
1036
+ or (is_b_numeric_array and is_a_string_array)
1037
+ )
1038
+
1039
+
1040
+ def needs_i8_conversion(dtype: DtypeObj | None) -> bool:
1041
+ """
1042
+ Check whether the dtype should be converted to int64.
1043
+
1044
+ Dtype "needs" such a conversion if the dtype is of a datetime-like dtype
1045
+
1046
+ Parameters
1047
+ ----------
1048
+ dtype : np.dtype, ExtensionDtype, or None
1049
+
1050
+ Returns
1051
+ -------
1052
+ boolean
1053
+ Whether or not the dtype should be converted to int64.
1054
+
1055
+ Examples
1056
+ --------
1057
+ >>> needs_i8_conversion(str)
1058
+ False
1059
+ >>> needs_i8_conversion(np.int64)
1060
+ False
1061
+ >>> needs_i8_conversion(np.datetime64)
1062
+ False
1063
+ >>> needs_i8_conversion(np.dtype(np.datetime64))
1064
+ True
1065
+ >>> needs_i8_conversion(np.array(['a', 'b']))
1066
+ False
1067
+ >>> needs_i8_conversion(pd.Series([1, 2]))
1068
+ False
1069
+ >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
1070
+ False
1071
+ >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
1072
+ False
1073
+ >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern").dtype)
1074
+ True
1075
+ """
1076
+ if isinstance(dtype, np.dtype):
1077
+ return dtype.kind in "mM"
1078
+ return isinstance(dtype, (PeriodDtype, DatetimeTZDtype))
1079
+
1080
+
1081
+ def is_numeric_dtype(arr_or_dtype) -> bool:
1082
+ """
1083
+ Check whether the provided array or dtype is of a numeric dtype.
1084
+
1085
+ Parameters
1086
+ ----------
1087
+ arr_or_dtype : array-like or dtype
1088
+ The array or dtype to check.
1089
+
1090
+ Returns
1091
+ -------
1092
+ boolean
1093
+ Whether or not the array or dtype is of a numeric dtype.
1094
+
1095
+ Examples
1096
+ --------
1097
+ >>> from pandas.api.types import is_numeric_dtype
1098
+ >>> is_numeric_dtype(str)
1099
+ False
1100
+ >>> is_numeric_dtype(int)
1101
+ True
1102
+ >>> is_numeric_dtype(float)
1103
+ True
1104
+ >>> is_numeric_dtype(np.uint64)
1105
+ True
1106
+ >>> is_numeric_dtype(np.datetime64)
1107
+ False
1108
+ >>> is_numeric_dtype(np.timedelta64)
1109
+ False
1110
+ >>> is_numeric_dtype(np.array(['a', 'b']))
1111
+ False
1112
+ >>> is_numeric_dtype(pd.Series([1, 2]))
1113
+ True
1114
+ >>> is_numeric_dtype(pd.Index([1, 2.]))
1115
+ True
1116
+ >>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
1117
+ False
1118
+ """
1119
+ return _is_dtype_type(
1120
+ arr_or_dtype, _classes_and_not_datetimelike(np.number, np.bool_)
1121
+ ) or _is_dtype(
1122
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric
1123
+ )
1124
+
1125
+
1126
+ def is_any_real_numeric_dtype(arr_or_dtype) -> bool:
1127
+ """
1128
+ Check whether the provided array or dtype is of a real number dtype.
1129
+
1130
+ Parameters
1131
+ ----------
1132
+ arr_or_dtype : array-like or dtype
1133
+ The array or dtype to check.
1134
+
1135
+ Returns
1136
+ -------
1137
+ boolean
1138
+ Whether or not the array or dtype is of a real number dtype.
1139
+
1140
+ Examples
1141
+ --------
1142
+ >>> from pandas.api.types import is_any_real_numeric_dtype
1143
+ >>> is_any_real_numeric_dtype(int)
1144
+ True
1145
+ >>> is_any_real_numeric_dtype(float)
1146
+ True
1147
+ >>> is_any_real_numeric_dtype(object)
1148
+ False
1149
+ >>> is_any_real_numeric_dtype(str)
1150
+ False
1151
+ >>> is_any_real_numeric_dtype(complex(1, 2))
1152
+ False
1153
+ >>> is_any_real_numeric_dtype(bool)
1154
+ False
1155
+ """
1156
+ return (
1157
+ is_numeric_dtype(arr_or_dtype)
1158
+ and not is_complex_dtype(arr_or_dtype)
1159
+ and not is_bool_dtype(arr_or_dtype)
1160
+ )
1161
+
1162
+
1163
+ def is_float_dtype(arr_or_dtype) -> bool:
1164
+ """
1165
+ Check whether the provided array or dtype is of a float dtype.
1166
+
1167
+ Parameters
1168
+ ----------
1169
+ arr_or_dtype : array-like or dtype
1170
+ The array or dtype to check.
1171
+
1172
+ Returns
1173
+ -------
1174
+ boolean
1175
+ Whether or not the array or dtype is of a float dtype.
1176
+
1177
+ Examples
1178
+ --------
1179
+ >>> from pandas.api.types import is_float_dtype
1180
+ >>> is_float_dtype(str)
1181
+ False
1182
+ >>> is_float_dtype(int)
1183
+ False
1184
+ >>> is_float_dtype(float)
1185
+ True
1186
+ >>> is_float_dtype(np.array(['a', 'b']))
1187
+ False
1188
+ >>> is_float_dtype(pd.Series([1, 2]))
1189
+ False
1190
+ >>> is_float_dtype(pd.Index([1, 2.]))
1191
+ True
1192
+ """
1193
+ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype(
1194
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f"
1195
+ )
1196
+
1197
+
1198
+ def is_bool_dtype(arr_or_dtype) -> bool:
1199
+ """
1200
+ Check whether the provided array or dtype is of a boolean dtype.
1201
+
1202
+ Parameters
1203
+ ----------
1204
+ arr_or_dtype : array-like or dtype
1205
+ The array or dtype to check.
1206
+
1207
+ Returns
1208
+ -------
1209
+ boolean
1210
+ Whether or not the array or dtype is of a boolean dtype.
1211
+
1212
+ Notes
1213
+ -----
1214
+ An ExtensionArray is considered boolean when the ``_is_boolean``
1215
+ attribute is set to True.
1216
+
1217
+ Examples
1218
+ --------
1219
+ >>> from pandas.api.types import is_bool_dtype
1220
+ >>> is_bool_dtype(str)
1221
+ False
1222
+ >>> is_bool_dtype(int)
1223
+ False
1224
+ >>> is_bool_dtype(bool)
1225
+ True
1226
+ >>> is_bool_dtype(np.bool_)
1227
+ True
1228
+ >>> is_bool_dtype(np.array(['a', 'b']))
1229
+ False
1230
+ >>> is_bool_dtype(pd.Series([1, 2]))
1231
+ False
1232
+ >>> is_bool_dtype(np.array([True, False]))
1233
+ True
1234
+ >>> is_bool_dtype(pd.Categorical([True, False]))
1235
+ True
1236
+ >>> is_bool_dtype(pd.arrays.SparseArray([True, False]))
1237
+ True
1238
+ """
1239
+ if arr_or_dtype is None:
1240
+ return False
1241
+ try:
1242
+ dtype = _get_dtype(arr_or_dtype)
1243
+ except (TypeError, ValueError):
1244
+ return False
1245
+
1246
+ if isinstance(dtype, CategoricalDtype):
1247
+ arr_or_dtype = dtype.categories
1248
+ # now we use the special definition for Index
1249
+
1250
+ if isinstance(arr_or_dtype, ABCIndex):
1251
+ # Allow Index[object] that is all-bools or Index["boolean"]
1252
+ if arr_or_dtype.inferred_type == "boolean":
1253
+ if not is_bool_dtype(arr_or_dtype.dtype):
1254
+ # GH#52680
1255
+ warnings.warn(
1256
+ "The behavior of is_bool_dtype with an object-dtype Index "
1257
+ "of bool objects is deprecated. In a future version, "
1258
+ "this will return False. Cast the Index to a bool dtype instead.",
1259
+ DeprecationWarning,
1260
+ stacklevel=2,
1261
+ )
1262
+ return True
1263
+ return False
1264
+ elif isinstance(dtype, ExtensionDtype):
1265
+ return getattr(dtype, "_is_boolean", False)
1266
+
1267
+ return issubclass(dtype.type, np.bool_)
1268
+
1269
+
1270
+ def is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool:
1271
+ """
1272
+ Analogue to is_extension_array_dtype but excluding DatetimeTZDtype.
1273
+ """
1274
+ return isinstance(dtype, ExtensionDtype) and not dtype._supports_2d
1275
+
1276
+
1277
+ def is_extension_array_dtype(arr_or_dtype) -> bool:
1278
+ """
1279
+ Check if an object is a pandas extension array type.
1280
+
1281
+ See the :ref:`Use Guide <extending.extension-types>` for more.
1282
+
1283
+ Parameters
1284
+ ----------
1285
+ arr_or_dtype : object
1286
+ For array-like input, the ``.dtype`` attribute will
1287
+ be extracted.
1288
+
1289
+ Returns
1290
+ -------
1291
+ bool
1292
+ Whether the `arr_or_dtype` is an extension array type.
1293
+
1294
+ Notes
1295
+ -----
1296
+ This checks whether an object implements the pandas extension
1297
+ array interface. In pandas, this includes:
1298
+
1299
+ * Categorical
1300
+ * Sparse
1301
+ * Interval
1302
+ * Period
1303
+ * DatetimeArray
1304
+ * TimedeltaArray
1305
+
1306
+ Third-party libraries may implement arrays or types satisfying
1307
+ this interface as well.
1308
+
1309
+ Examples
1310
+ --------
1311
+ >>> from pandas.api.types import is_extension_array_dtype
1312
+ >>> arr = pd.Categorical(['a', 'b'])
1313
+ >>> is_extension_array_dtype(arr)
1314
+ True
1315
+ >>> is_extension_array_dtype(arr.dtype)
1316
+ True
1317
+
1318
+ >>> arr = np.array(['a', 'b'])
1319
+ >>> is_extension_array_dtype(arr.dtype)
1320
+ False
1321
+ """
1322
+ dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype)
1323
+ if isinstance(dtype, ExtensionDtype):
1324
+ return True
1325
+ elif isinstance(dtype, np.dtype):
1326
+ return False
1327
+ else:
1328
+ return registry.find(dtype) is not None
1329
+
1330
+
1331
+ def is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool:
1332
+ """
1333
+ Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype.
1334
+
1335
+ Notes
1336
+ -----
1337
+ Checks only for dtype objects, not dtype-castable strings or types.
1338
+ """
1339
+ return isinstance(dtype, ExtensionDtype) or (lib.is_np_dtype(dtype, "mM"))
1340
+
1341
+
1342
+ def is_complex_dtype(arr_or_dtype) -> bool:
1343
+ """
1344
+ Check whether the provided array or dtype is of a complex dtype.
1345
+
1346
+ Parameters
1347
+ ----------
1348
+ arr_or_dtype : array-like or dtype
1349
+ The array or dtype to check.
1350
+
1351
+ Returns
1352
+ -------
1353
+ boolean
1354
+ Whether or not the array or dtype is of a complex dtype.
1355
+
1356
+ Examples
1357
+ --------
1358
+ >>> from pandas.api.types import is_complex_dtype
1359
+ >>> is_complex_dtype(str)
1360
+ False
1361
+ >>> is_complex_dtype(int)
1362
+ False
1363
+ >>> is_complex_dtype(np.complex128)
1364
+ True
1365
+ >>> is_complex_dtype(np.array(['a', 'b']))
1366
+ False
1367
+ >>> is_complex_dtype(pd.Series([1, 2]))
1368
+ False
1369
+ >>> is_complex_dtype(np.array([1 + 1j, 5]))
1370
+ True
1371
+ """
1372
+ return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))
1373
+
1374
+
1375
+ def _is_dtype(arr_or_dtype, condition) -> bool:
1376
+ """
1377
+ Return true if the condition is satisfied for the arr_or_dtype.
1378
+
1379
+ Parameters
1380
+ ----------
1381
+ arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType
1382
+ The array-like or dtype object whose dtype we want to extract.
1383
+ condition : callable[Union[np.dtype, ExtensionDtype]]
1384
+
1385
+ Returns
1386
+ -------
1387
+ bool
1388
+
1389
+ """
1390
+ if arr_or_dtype is None:
1391
+ return False
1392
+ try:
1393
+ dtype = _get_dtype(arr_or_dtype)
1394
+ except (TypeError, ValueError):
1395
+ return False
1396
+ return condition(dtype)
1397
+
1398
+
1399
+ def _get_dtype(arr_or_dtype) -> DtypeObj:
1400
+ """
1401
+ Get the dtype instance associated with an array
1402
+ or dtype object.
1403
+
1404
+ Parameters
1405
+ ----------
1406
+ arr_or_dtype : array-like or dtype
1407
+ The array-like or dtype object whose dtype we want to extract.
1408
+
1409
+ Returns
1410
+ -------
1411
+ obj_dtype : The extract dtype instance from the
1412
+ passed in array or dtype object.
1413
+
1414
+ Raises
1415
+ ------
1416
+ TypeError : The passed in object is None.
1417
+ """
1418
+ if arr_or_dtype is None:
1419
+ raise TypeError("Cannot deduce dtype from null object")
1420
+
1421
+ # fastpath
1422
+ if isinstance(arr_or_dtype, np.dtype):
1423
+ return arr_or_dtype
1424
+ elif isinstance(arr_or_dtype, type):
1425
+ return np.dtype(arr_or_dtype)
1426
+
1427
+ # if we have an array-like
1428
+ elif hasattr(arr_or_dtype, "dtype"):
1429
+ arr_or_dtype = arr_or_dtype.dtype
1430
+
1431
+ return pandas_dtype(arr_or_dtype)
1432
+
1433
+
1434
+ def _is_dtype_type(arr_or_dtype, condition) -> bool:
1435
+ """
1436
+ Return true if the condition is satisfied for the arr_or_dtype.
1437
+
1438
+ Parameters
1439
+ ----------
1440
+ arr_or_dtype : array-like or dtype
1441
+ The array-like or dtype object whose dtype we want to extract.
1442
+ condition : callable[Union[np.dtype, ExtensionDtypeType]]
1443
+
1444
+ Returns
1445
+ -------
1446
+ bool : if the condition is satisfied for the arr_or_dtype
1447
+ """
1448
+ if arr_or_dtype is None:
1449
+ return condition(type(None))
1450
+
1451
+ # fastpath
1452
+ if isinstance(arr_or_dtype, np.dtype):
1453
+ return condition(arr_or_dtype.type)
1454
+ elif isinstance(arr_or_dtype, type):
1455
+ if issubclass(arr_or_dtype, ExtensionDtype):
1456
+ arr_or_dtype = arr_or_dtype.type
1457
+ return condition(np.dtype(arr_or_dtype).type)
1458
+
1459
+ # if we have an array-like
1460
+ if hasattr(arr_or_dtype, "dtype"):
1461
+ arr_or_dtype = arr_or_dtype.dtype
1462
+
1463
+ # we are not possibly a dtype
1464
+ elif is_list_like(arr_or_dtype):
1465
+ return condition(type(None))
1466
+
1467
+ try:
1468
+ tipo = pandas_dtype(arr_or_dtype).type
1469
+ except (TypeError, ValueError):
1470
+ if is_scalar(arr_or_dtype):
1471
+ return condition(type(None))
1472
+
1473
+ return False
1474
+
1475
+ return condition(tipo)
1476
+
1477
+
1478
+ def infer_dtype_from_object(dtype) -> type:
1479
+ """
1480
+ Get a numpy dtype.type-style object for a dtype object.
1481
+
1482
+ This methods also includes handling of the datetime64[ns] and
1483
+ datetime64[ns, TZ] objects.
1484
+
1485
+ If no dtype can be found, we return ``object``.
1486
+
1487
+ Parameters
1488
+ ----------
1489
+ dtype : dtype, type
1490
+ The dtype object whose numpy dtype.type-style
1491
+ object we want to extract.
1492
+
1493
+ Returns
1494
+ -------
1495
+ type
1496
+ """
1497
+ if isinstance(dtype, type) and issubclass(dtype, np.generic):
1498
+ # Type object from a dtype
1499
+
1500
+ return dtype
1501
+ elif isinstance(dtype, (np.dtype, ExtensionDtype)):
1502
+ # dtype object
1503
+ try:
1504
+ _validate_date_like_dtype(dtype)
1505
+ except TypeError:
1506
+ # Should still pass if we don't have a date-like
1507
+ pass
1508
+ if hasattr(dtype, "numpy_dtype"):
1509
+ # TODO: Implement this properly
1510
+ # https://github.com/pandas-dev/pandas/issues/52576
1511
+ return dtype.numpy_dtype.type
1512
+ return dtype.type
1513
+
1514
+ try:
1515
+ dtype = pandas_dtype(dtype)
1516
+ except TypeError:
1517
+ pass
1518
+
1519
+ if isinstance(dtype, ExtensionDtype):
1520
+ return dtype.type
1521
+ elif isinstance(dtype, str):
1522
+ # TODO(jreback)
1523
+ # should deprecate these
1524
+ if dtype in ["datetimetz", "datetime64tz"]:
1525
+ return DatetimeTZDtype.type
1526
+ elif dtype in ["period"]:
1527
+ raise NotImplementedError
1528
+
1529
+ if dtype in ["datetime", "timedelta"]:
1530
+ dtype += "64"
1531
+ try:
1532
+ return infer_dtype_from_object(getattr(np, dtype))
1533
+ except (AttributeError, TypeError):
1534
+ # Handles cases like _get_dtype(int) i.e.,
1535
+ # Python objects that are valid dtypes
1536
+ # (unlike user-defined types, in general)
1537
+ #
1538
+ # TypeError handles the float16 type code of 'e'
1539
+ # further handle internal types
1540
+ pass
1541
+
1542
+ return infer_dtype_from_object(np.dtype(dtype))
1543
+
1544
+
1545
+ def _validate_date_like_dtype(dtype) -> None:
1546
+ """
1547
+ Check whether the dtype is a date-like dtype. Raises an error if invalid.
1548
+
1549
+ Parameters
1550
+ ----------
1551
+ dtype : dtype, type
1552
+ The dtype to check.
1553
+
1554
+ Raises
1555
+ ------
1556
+ TypeError : The dtype could not be casted to a date-like dtype.
1557
+ ValueError : The dtype is an illegal date-like dtype (e.g. the
1558
+ frequency provided is too specific)
1559
+ """
1560
+ try:
1561
+ typ = np.datetime_data(dtype)[0]
1562
+ except ValueError as e:
1563
+ raise TypeError(e) from e
1564
+ if typ not in ["generic", "ns"]:
1565
+ raise ValueError(
1566
+ f"{repr(dtype.name)} is too specific of a frequency, "
1567
+ f"try passing {repr(dtype.type.__name__)}"
1568
+ )
1569
+
1570
+
1571
+ def validate_all_hashable(*args, error_name: str | None = None) -> None:
1572
+ """
1573
+ Return None if all args are hashable, else raise a TypeError.
1574
+
1575
+ Parameters
1576
+ ----------
1577
+ *args
1578
+ Arguments to validate.
1579
+ error_name : str, optional
1580
+ The name to use if error
1581
+
1582
+ Raises
1583
+ ------
1584
+ TypeError : If an argument is not hashable
1585
+
1586
+ Returns
1587
+ -------
1588
+ None
1589
+ """
1590
+ if not all(is_hashable(arg) for arg in args):
1591
+ if error_name:
1592
+ raise TypeError(f"{error_name} must be a hashable type")
1593
+ raise TypeError("All elements must be hashable")
1594
+
1595
+
1596
+ def pandas_dtype(dtype) -> DtypeObj:
1597
+ """
1598
+ Convert input into a pandas only dtype object or a numpy dtype object.
1599
+
1600
+ Parameters
1601
+ ----------
1602
+ dtype : object to be converted
1603
+
1604
+ Returns
1605
+ -------
1606
+ np.dtype or a pandas dtype
1607
+
1608
+ Raises
1609
+ ------
1610
+ TypeError if not a dtype
1611
+
1612
+ Examples
1613
+ --------
1614
+ >>> pd.api.types.pandas_dtype(int)
1615
+ dtype('int64')
1616
+ """
1617
+ # short-circuit
1618
+ if isinstance(dtype, np.ndarray):
1619
+ return dtype.dtype
1620
+ elif isinstance(dtype, (np.dtype, ExtensionDtype)):
1621
+ return dtype
1622
+
1623
+ # registered extension types
1624
+ result = registry.find(dtype)
1625
+ if result is not None:
1626
+ if isinstance(result, type):
1627
+ # GH 31356, GH 54592
1628
+ warnings.warn(
1629
+ f"Instantiating {result.__name__} without any arguments."
1630
+ f"Pass a {result.__name__} instance to silence this warning.",
1631
+ UserWarning,
1632
+ stacklevel=find_stack_level(),
1633
+ )
1634
+ result = result()
1635
+ return result
1636
+
1637
+ # try a numpy dtype
1638
+ # raise a consistent TypeError if failed
1639
+ try:
1640
+ with warnings.catch_warnings():
1641
+ # GH#51523 - Series.astype(np.integer) doesn't show
1642
+ # numpy deprecation warning of np.integer
1643
+ # Hence enabling DeprecationWarning
1644
+ warnings.simplefilter("always", DeprecationWarning)
1645
+ npdtype = np.dtype(dtype)
1646
+ except SyntaxError as err:
1647
+ # np.dtype uses `eval` which can raise SyntaxError
1648
+ raise TypeError(f"data type '{dtype}' not understood") from err
1649
+
1650
+ # Any invalid dtype (such as pd.Timestamp) should raise an error.
1651
+ # np.dtype(invalid_type).kind = 0 for such objects. However, this will
1652
+ # also catch some valid dtypes such as object, np.object_ and 'object'
1653
+ # which we safeguard against by catching them earlier and returning
1654
+ # np.dtype(valid_dtype) before this condition is evaluated.
1655
+ if is_hashable(dtype) and dtype in [
1656
+ object,
1657
+ np.object_,
1658
+ "object",
1659
+ "O",
1660
+ "object_",
1661
+ ]:
1662
+ # check hashability to avoid errors/DeprecationWarning when we get
1663
+ # here and `dtype` is an array
1664
+ return npdtype
1665
+ elif npdtype.kind == "O":
1666
+ raise TypeError(f"dtype '{dtype}' not understood")
1667
+
1668
+ return npdtype
1669
+
1670
+
1671
+ def is_all_strings(value: ArrayLike) -> bool:
1672
+ """
1673
+ Check if this is an array of strings that we should try parsing.
1674
+
1675
+ Includes object-dtype ndarray containing all-strings, StringArray,
1676
+ and Categorical with all-string categories.
1677
+ Does not include numpy string dtypes.
1678
+ """
1679
+ dtype = value.dtype
1680
+
1681
+ if isinstance(dtype, np.dtype):
1682
+ if len(value) == 0:
1683
+ return dtype == np.dtype("object")
1684
+ else:
1685
+ return dtype == np.dtype("object") and lib.is_string_array(
1686
+ np.asarray(value), skipna=False
1687
+ )
1688
+ elif isinstance(dtype, CategoricalDtype):
1689
+ return dtype.categories.inferred_type == "string"
1690
+ return dtype == "string"
1691
+
1692
+
1693
+ __all__ = [
1694
+ "classes",
1695
+ "DT64NS_DTYPE",
1696
+ "ensure_float64",
1697
+ "ensure_python_int",
1698
+ "ensure_str",
1699
+ "infer_dtype_from_object",
1700
+ "INT64_DTYPE",
1701
+ "is_1d_only_ea_dtype",
1702
+ "is_all_strings",
1703
+ "is_any_real_numeric_dtype",
1704
+ "is_array_like",
1705
+ "is_bool",
1706
+ "is_bool_dtype",
1707
+ "is_categorical_dtype",
1708
+ "is_complex",
1709
+ "is_complex_dtype",
1710
+ "is_dataclass",
1711
+ "is_datetime64_any_dtype",
1712
+ "is_datetime64_dtype",
1713
+ "is_datetime64_ns_dtype",
1714
+ "is_datetime64tz_dtype",
1715
+ "is_decimal",
1716
+ "is_dict_like",
1717
+ "is_dtype_equal",
1718
+ "is_ea_or_datetimelike_dtype",
1719
+ "is_extension_array_dtype",
1720
+ "is_file_like",
1721
+ "is_float_dtype",
1722
+ "is_int64_dtype",
1723
+ "is_integer_dtype",
1724
+ "is_interval",
1725
+ "is_interval_dtype",
1726
+ "is_iterator",
1727
+ "is_named_tuple",
1728
+ "is_nested_list_like",
1729
+ "is_number",
1730
+ "is_numeric_dtype",
1731
+ "is_object_dtype",
1732
+ "is_period_dtype",
1733
+ "is_re",
1734
+ "is_re_compilable",
1735
+ "is_scipy_sparse",
1736
+ "is_sequence",
1737
+ "is_signed_integer_dtype",
1738
+ "is_sparse",
1739
+ "is_string_dtype",
1740
+ "is_string_or_object_np_dtype",
1741
+ "is_timedelta64_dtype",
1742
+ "is_timedelta64_ns_dtype",
1743
+ "is_unsigned_integer_dtype",
1744
+ "needs_i8_conversion",
1745
+ "pandas_dtype",
1746
+ "TD64NS_DTYPE",
1747
+ "validate_all_hashable",
1748
+ ]
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/concat.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions related to concat.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ cast,
9
+ )
10
+ import warnings
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import lib
15
+ from pandas.util._exceptions import find_stack_level
16
+
17
+ from pandas.core.dtypes.astype import astype_array
18
+ from pandas.core.dtypes.cast import (
19
+ common_dtype_categorical_compat,
20
+ find_common_type,
21
+ np_find_common_type,
22
+ )
23
+ from pandas.core.dtypes.dtypes import CategoricalDtype
24
+ from pandas.core.dtypes.generic import (
25
+ ABCCategoricalIndex,
26
+ ABCSeries,
27
+ )
28
+
29
+ if TYPE_CHECKING:
30
+ from collections.abc import Sequence
31
+
32
+ from pandas._typing import (
33
+ ArrayLike,
34
+ AxisInt,
35
+ DtypeObj,
36
+ )
37
+
38
+ from pandas.core.arrays import (
39
+ Categorical,
40
+ ExtensionArray,
41
+ )
42
+
43
+
44
+ def _is_nonempty(x, axis) -> bool:
45
+ # filter empty arrays
46
+ # 1-d dtypes always are included here
47
+ if x.ndim <= axis:
48
+ return True
49
+ return x.shape[axis] > 0
50
+
51
+
52
+ def concat_compat(
53
+ to_concat: Sequence[ArrayLike], axis: AxisInt = 0, ea_compat_axis: bool = False
54
+ ) -> ArrayLike:
55
+ """
56
+ provide concatenation of an array of arrays each of which is a single
57
+ 'normalized' dtypes (in that for example, if it's object, then it is a
58
+ non-datetimelike and provide a combined dtype for the resulting array that
59
+ preserves the overall dtype if possible)
60
+
61
+ Parameters
62
+ ----------
63
+ to_concat : sequence of arrays
64
+ axis : axis to provide concatenation
65
+ ea_compat_axis : bool, default False
66
+ For ExtensionArray compat, behave as if axis == 1 when determining
67
+ whether to drop empty arrays.
68
+
69
+ Returns
70
+ -------
71
+ a single array, preserving the combined dtypes
72
+ """
73
+ if len(to_concat) and lib.dtypes_all_equal([obj.dtype for obj in to_concat]):
74
+ # fastpath!
75
+ obj = to_concat[0]
76
+ if isinstance(obj, np.ndarray):
77
+ to_concat_arrs = cast("Sequence[np.ndarray]", to_concat)
78
+ return np.concatenate(to_concat_arrs, axis=axis)
79
+
80
+ to_concat_eas = cast("Sequence[ExtensionArray]", to_concat)
81
+ if ea_compat_axis:
82
+ # We have 1D objects, that don't support axis keyword
83
+ return obj._concat_same_type(to_concat_eas)
84
+ elif axis == 0:
85
+ return obj._concat_same_type(to_concat_eas)
86
+ else:
87
+ # e.g. DatetimeArray
88
+ # NB: We are assuming here that ensure_wrapped_if_arraylike has
89
+ # been called where relevant.
90
+ return obj._concat_same_type(
91
+ # error: Unexpected keyword argument "axis" for "_concat_same_type"
92
+ # of "ExtensionArray"
93
+ to_concat_eas,
94
+ axis=axis, # type: ignore[call-arg]
95
+ )
96
+
97
+ # If all arrays are empty, there's nothing to convert, just short-cut to
98
+ # the concatenation, #3121.
99
+ #
100
+ # Creating an empty array directly is tempting, but the winnings would be
101
+ # marginal given that it would still require shape & dtype calculation and
102
+ # np.concatenate which has them both implemented is compiled.
103
+ orig = to_concat
104
+ non_empties = [x for x in to_concat if _is_nonempty(x, axis)]
105
+ if non_empties and axis == 0 and not ea_compat_axis:
106
+ # ea_compat_axis see GH#39574
107
+ to_concat = non_empties
108
+
109
+ any_ea, kinds, target_dtype = _get_result_dtype(to_concat, non_empties)
110
+
111
+ if len(to_concat) < len(orig):
112
+ _, _, alt_dtype = _get_result_dtype(orig, non_empties)
113
+ if alt_dtype != target_dtype:
114
+ # GH#39122
115
+ warnings.warn(
116
+ "The behavior of array concatenation with empty entries is "
117
+ "deprecated. In a future version, this will no longer exclude "
118
+ "empty items when determining the result dtype. "
119
+ "To retain the old behavior, exclude the empty entries before "
120
+ "the concat operation.",
121
+ FutureWarning,
122
+ stacklevel=find_stack_level(),
123
+ )
124
+
125
+ if target_dtype is not None:
126
+ to_concat = [astype_array(arr, target_dtype, copy=False) for arr in to_concat]
127
+
128
+ if not isinstance(to_concat[0], np.ndarray):
129
+ # i.e. isinstance(to_concat[0], ExtensionArray)
130
+ to_concat_eas = cast("Sequence[ExtensionArray]", to_concat)
131
+ cls = type(to_concat[0])
132
+ # GH#53640: eg. for datetime array, axis=1 but 0 is default
133
+ # However, class method `_concat_same_type()` for some classes
134
+ # may not support the `axis` keyword
135
+ if ea_compat_axis or axis == 0:
136
+ return cls._concat_same_type(to_concat_eas)
137
+ else:
138
+ return cls._concat_same_type(
139
+ to_concat_eas,
140
+ axis=axis, # type: ignore[call-arg]
141
+ )
142
+ else:
143
+ to_concat_arrs = cast("Sequence[np.ndarray]", to_concat)
144
+ result = np.concatenate(to_concat_arrs, axis=axis)
145
+
146
+ if not any_ea and "b" in kinds and result.dtype.kind in "iuf":
147
+ # GH#39817 cast to object instead of casting bools to numeric
148
+ result = result.astype(object, copy=False)
149
+ return result
150
+
151
+
152
+ def _get_result_dtype(
153
+ to_concat: Sequence[ArrayLike], non_empties: Sequence[ArrayLike]
154
+ ) -> tuple[bool, set[str], DtypeObj | None]:
155
+ target_dtype = None
156
+
157
+ dtypes = {obj.dtype for obj in to_concat}
158
+ kinds = {obj.dtype.kind for obj in to_concat}
159
+
160
+ any_ea = any(not isinstance(x, np.ndarray) for x in to_concat)
161
+ if any_ea:
162
+ # i.e. any ExtensionArrays
163
+
164
+ # we ignore axis here, as internally concatting with EAs is always
165
+ # for axis=0
166
+ if len(dtypes) != 1:
167
+ target_dtype = find_common_type([x.dtype for x in to_concat])
168
+ target_dtype = common_dtype_categorical_compat(to_concat, target_dtype)
169
+
170
+ elif not len(non_empties):
171
+ # we have all empties, but may need to coerce the result dtype to
172
+ # object if we have non-numeric type operands (numpy would otherwise
173
+ # cast this to float)
174
+ if len(kinds) != 1:
175
+ if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}):
176
+ # let numpy coerce
177
+ pass
178
+ else:
179
+ # coerce to object
180
+ target_dtype = np.dtype(object)
181
+ kinds = {"o"}
182
+ else:
183
+ # error: Argument 1 to "np_find_common_type" has incompatible type
184
+ # "*Set[Union[ExtensionDtype, Any]]"; expected "dtype[Any]"
185
+ target_dtype = np_find_common_type(*dtypes) # type: ignore[arg-type]
186
+
187
+ return any_ea, kinds, target_dtype
188
+
189
+
190
+ def union_categoricals(
191
+ to_union, sort_categories: bool = False, ignore_order: bool = False
192
+ ) -> Categorical:
193
+ """
194
+ Combine list-like of Categorical-like, unioning categories.
195
+
196
+ All categories must have the same dtype.
197
+
198
+ Parameters
199
+ ----------
200
+ to_union : list-like
201
+ Categorical, CategoricalIndex, or Series with dtype='category'.
202
+ sort_categories : bool, default False
203
+ If true, resulting categories will be lexsorted, otherwise
204
+ they will be ordered as they appear in the data.
205
+ ignore_order : bool, default False
206
+ If true, the ordered attribute of the Categoricals will be ignored.
207
+ Results in an unordered categorical.
208
+
209
+ Returns
210
+ -------
211
+ Categorical
212
+
213
+ Raises
214
+ ------
215
+ TypeError
216
+ - all inputs do not have the same dtype
217
+ - all inputs do not have the same ordered property
218
+ - all inputs are ordered and their categories are not identical
219
+ - sort_categories=True and Categoricals are ordered
220
+ ValueError
221
+ Empty list of categoricals passed
222
+
223
+ Notes
224
+ -----
225
+ To learn more about categories, see `link
226
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html#unioning>`__
227
+
228
+ Examples
229
+ --------
230
+ If you want to combine categoricals that do not necessarily have
231
+ the same categories, `union_categoricals` will combine a list-like
232
+ of categoricals. The new categories will be the union of the
233
+ categories being combined.
234
+
235
+ >>> a = pd.Categorical(["b", "c"])
236
+ >>> b = pd.Categorical(["a", "b"])
237
+ >>> pd.api.types.union_categoricals([a, b])
238
+ ['b', 'c', 'a', 'b']
239
+ Categories (3, object): ['b', 'c', 'a']
240
+
241
+ By default, the resulting categories will be ordered as they appear
242
+ in the `categories` of the data. If you want the categories to be
243
+ lexsorted, use `sort_categories=True` argument.
244
+
245
+ >>> pd.api.types.union_categoricals([a, b], sort_categories=True)
246
+ ['b', 'c', 'a', 'b']
247
+ Categories (3, object): ['a', 'b', 'c']
248
+
249
+ `union_categoricals` also works with the case of combining two
250
+ categoricals of the same categories and order information (e.g. what
251
+ you could also `append` for).
252
+
253
+ >>> a = pd.Categorical(["a", "b"], ordered=True)
254
+ >>> b = pd.Categorical(["a", "b", "a"], ordered=True)
255
+ >>> pd.api.types.union_categoricals([a, b])
256
+ ['a', 'b', 'a', 'b', 'a']
257
+ Categories (2, object): ['a' < 'b']
258
+
259
+ Raises `TypeError` because the categories are ordered and not identical.
260
+
261
+ >>> a = pd.Categorical(["a", "b"], ordered=True)
262
+ >>> b = pd.Categorical(["a", "b", "c"], ordered=True)
263
+ >>> pd.api.types.union_categoricals([a, b])
264
+ Traceback (most recent call last):
265
+ ...
266
+ TypeError: to union ordered Categoricals, all categories must be the same
267
+
268
+ Ordered categoricals with different categories or orderings can be
269
+ combined by using the `ignore_ordered=True` argument.
270
+
271
+ >>> a = pd.Categorical(["a", "b", "c"], ordered=True)
272
+ >>> b = pd.Categorical(["c", "b", "a"], ordered=True)
273
+ >>> pd.api.types.union_categoricals([a, b], ignore_order=True)
274
+ ['a', 'b', 'c', 'c', 'b', 'a']
275
+ Categories (3, object): ['a', 'b', 'c']
276
+
277
+ `union_categoricals` also works with a `CategoricalIndex`, or `Series`
278
+ containing categorical data, but note that the resulting array will
279
+ always be a plain `Categorical`
280
+
281
+ >>> a = pd.Series(["b", "c"], dtype='category')
282
+ >>> b = pd.Series(["a", "b"], dtype='category')
283
+ >>> pd.api.types.union_categoricals([a, b])
284
+ ['b', 'c', 'a', 'b']
285
+ Categories (3, object): ['b', 'c', 'a']
286
+ """
287
+ from pandas import Categorical
288
+ from pandas.core.arrays.categorical import recode_for_categories
289
+
290
+ if len(to_union) == 0:
291
+ raise ValueError("No Categoricals to union")
292
+
293
+ def _maybe_unwrap(x):
294
+ if isinstance(x, (ABCCategoricalIndex, ABCSeries)):
295
+ return x._values
296
+ elif isinstance(x, Categorical):
297
+ return x
298
+ else:
299
+ raise TypeError("all components to combine must be Categorical")
300
+
301
+ to_union = [_maybe_unwrap(x) for x in to_union]
302
+ first = to_union[0]
303
+
304
+ if not lib.dtypes_all_equal([obj.categories.dtype for obj in to_union]):
305
+ raise TypeError("dtype of categories must be the same")
306
+
307
+ ordered = False
308
+ if all(first._categories_match_up_to_permutation(other) for other in to_union[1:]):
309
+ # identical categories - fastpath
310
+ categories = first.categories
311
+ ordered = first.ordered
312
+
313
+ all_codes = [first._encode_with_my_categories(x)._codes for x in to_union]
314
+ new_codes = np.concatenate(all_codes)
315
+
316
+ if sort_categories and not ignore_order and ordered:
317
+ raise TypeError("Cannot use sort_categories=True with ordered Categoricals")
318
+
319
+ if sort_categories and not categories.is_monotonic_increasing:
320
+ categories = categories.sort_values()
321
+ indexer = categories.get_indexer(first.categories)
322
+
323
+ from pandas.core.algorithms import take_nd
324
+
325
+ new_codes = take_nd(indexer, new_codes, fill_value=-1)
326
+ elif ignore_order or all(not c.ordered for c in to_union):
327
+ # different categories - union and recode
328
+ cats = first.categories.append([c.categories for c in to_union[1:]])
329
+ categories = cats.unique()
330
+ if sort_categories:
331
+ categories = categories.sort_values()
332
+
333
+ new_codes = [
334
+ recode_for_categories(c.codes, c.categories, categories) for c in to_union
335
+ ]
336
+ new_codes = np.concatenate(new_codes)
337
+ else:
338
+ # ordered - to show a proper error message
339
+ if all(c.ordered for c in to_union):
340
+ msg = "to union ordered Categoricals, all categories must be the same"
341
+ raise TypeError(msg)
342
+ raise TypeError("Categorical.ordered must be the same")
343
+
344
+ if ignore_order:
345
+ ordered = False
346
+
347
+ dtype = CategoricalDtype(categories=categories, ordered=ordered)
348
+ return Categorical._simple_new(new_codes, dtype=dtype)
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py ADDED
@@ -0,0 +1,2348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Define extension dtypes.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from datetime import (
7
+ date,
8
+ datetime,
9
+ time,
10
+ timedelta,
11
+ )
12
+ from decimal import Decimal
13
+ import re
14
+ from typing import (
15
+ TYPE_CHECKING,
16
+ Any,
17
+ cast,
18
+ )
19
+ import warnings
20
+
21
+ import numpy as np
22
+ import pytz
23
+
24
+ from pandas._libs import (
25
+ lib,
26
+ missing as libmissing,
27
+ )
28
+ from pandas._libs.interval import Interval
29
+ from pandas._libs.properties import cache_readonly
30
+ from pandas._libs.tslibs import (
31
+ BaseOffset,
32
+ NaT,
33
+ NaTType,
34
+ Period,
35
+ Timedelta,
36
+ Timestamp,
37
+ timezones,
38
+ to_offset,
39
+ tz_compare,
40
+ )
41
+ from pandas._libs.tslibs.dtypes import (
42
+ PeriodDtypeBase,
43
+ abbrev_to_npy_unit,
44
+ )
45
+ from pandas._libs.tslibs.offsets import BDay
46
+ from pandas.compat import pa_version_under10p1
47
+ from pandas.errors import PerformanceWarning
48
+ from pandas.util._exceptions import find_stack_level
49
+
50
+ from pandas.core.dtypes.base import (
51
+ ExtensionDtype,
52
+ StorageExtensionDtype,
53
+ register_extension_dtype,
54
+ )
55
+ from pandas.core.dtypes.generic import (
56
+ ABCCategoricalIndex,
57
+ ABCIndex,
58
+ ABCRangeIndex,
59
+ )
60
+ from pandas.core.dtypes.inference import (
61
+ is_bool,
62
+ is_list_like,
63
+ )
64
+
65
+ from pandas.util import capitalize_first_letter
66
+
67
+ if not pa_version_under10p1:
68
+ import pyarrow as pa
69
+
70
+ if TYPE_CHECKING:
71
+ from collections.abc import MutableMapping
72
+ from datetime import tzinfo
73
+
74
+ import pyarrow as pa # noqa: TCH004
75
+
76
+ from pandas._typing import (
77
+ Dtype,
78
+ DtypeObj,
79
+ IntervalClosedType,
80
+ Ordered,
81
+ Self,
82
+ npt,
83
+ type_t,
84
+ )
85
+
86
+ from pandas import (
87
+ Categorical,
88
+ CategoricalIndex,
89
+ DatetimeIndex,
90
+ Index,
91
+ IntervalIndex,
92
+ PeriodIndex,
93
+ )
94
+ from pandas.core.arrays import (
95
+ BaseMaskedArray,
96
+ DatetimeArray,
97
+ IntervalArray,
98
+ NumpyExtensionArray,
99
+ PeriodArray,
100
+ SparseArray,
101
+ )
102
+ from pandas.core.arrays.arrow import ArrowExtensionArray
103
+
104
+ str_type = str
105
+
106
+
107
+ class PandasExtensionDtype(ExtensionDtype):
108
+ """
109
+ A np.dtype duck-typed class, suitable for holding a custom dtype.
110
+
111
+ THIS IS NOT A REAL NUMPY DTYPE
112
+ """
113
+
114
+ type: Any
115
+ kind: Any
116
+ # The Any type annotations above are here only because mypy seems to have a
117
+ # problem dealing with multiple inheritance from PandasExtensionDtype
118
+ # and ExtensionDtype's @properties in the subclasses below. The kind and
119
+ # type variables in those subclasses are explicitly typed below.
120
+ subdtype = None
121
+ str: str_type
122
+ num = 100
123
+ shape: tuple[int, ...] = ()
124
+ itemsize = 8
125
+ base: DtypeObj | None = None
126
+ isbuiltin = 0
127
+ isnative = 0
128
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
129
+
130
+ def __repr__(self) -> str_type:
131
+ """
132
+ Return a string representation for a particular object.
133
+ """
134
+ return str(self)
135
+
136
+ def __hash__(self) -> int:
137
+ raise NotImplementedError("sub-classes should implement an __hash__ method")
138
+
139
+ def __getstate__(self) -> dict[str_type, Any]:
140
+ # pickle support; we don't want to pickle the cache
141
+ return {k: getattr(self, k, None) for k in self._metadata}
142
+
143
+ @classmethod
144
+ def reset_cache(cls) -> None:
145
+ """clear the cache"""
146
+ cls._cache_dtypes = {}
147
+
148
+
149
+ class CategoricalDtypeType(type):
150
+ """
151
+ the type of CategoricalDtype, this metaclass determines subclass ability
152
+ """
153
+
154
+
155
+ @register_extension_dtype
156
+ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
157
+ """
158
+ Type for categorical data with the categories and orderedness.
159
+
160
+ Parameters
161
+ ----------
162
+ categories : sequence, optional
163
+ Must be unique, and must not contain any nulls.
164
+ The categories are stored in an Index,
165
+ and if an index is provided the dtype of that index will be used.
166
+ ordered : bool or None, default False
167
+ Whether or not this categorical is treated as a ordered categorical.
168
+ None can be used to maintain the ordered value of existing categoricals when
169
+ used in operations that combine categoricals, e.g. astype, and will resolve to
170
+ False if there is no existing ordered to maintain.
171
+
172
+ Attributes
173
+ ----------
174
+ categories
175
+ ordered
176
+
177
+ Methods
178
+ -------
179
+ None
180
+
181
+ See Also
182
+ --------
183
+ Categorical : Represent a categorical variable in classic R / S-plus fashion.
184
+
185
+ Notes
186
+ -----
187
+ This class is useful for specifying the type of a ``Categorical``
188
+ independent of the values. See :ref:`categorical.categoricaldtype`
189
+ for more.
190
+
191
+ Examples
192
+ --------
193
+ >>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)
194
+ >>> pd.Series(['a', 'b', 'a', 'c'], dtype=t)
195
+ 0 a
196
+ 1 b
197
+ 2 a
198
+ 3 NaN
199
+ dtype: category
200
+ Categories (2, object): ['b' < 'a']
201
+
202
+ An empty CategoricalDtype with a specific dtype can be created
203
+ by providing an empty index. As follows,
204
+
205
+ >>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype
206
+ dtype('<M8[ns]')
207
+ """
208
+
209
+ # TODO: Document public vs. private API
210
+ name = "category"
211
+ type: type[CategoricalDtypeType] = CategoricalDtypeType
212
+ kind: str_type = "O"
213
+ str = "|O08"
214
+ base = np.dtype("O")
215
+ _metadata = ("categories", "ordered")
216
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
217
+ _supports_2d = False
218
+ _can_fast_transpose = False
219
+
220
+ def __init__(self, categories=None, ordered: Ordered = False) -> None:
221
+ self._finalize(categories, ordered, fastpath=False)
222
+
223
+ @classmethod
224
+ def _from_fastpath(
225
+ cls, categories=None, ordered: bool | None = None
226
+ ) -> CategoricalDtype:
227
+ self = cls.__new__(cls)
228
+ self._finalize(categories, ordered, fastpath=True)
229
+ return self
230
+
231
+ @classmethod
232
+ def _from_categorical_dtype(
233
+ cls, dtype: CategoricalDtype, categories=None, ordered: Ordered | None = None
234
+ ) -> CategoricalDtype:
235
+ if categories is ordered is None:
236
+ return dtype
237
+ if categories is None:
238
+ categories = dtype.categories
239
+ if ordered is None:
240
+ ordered = dtype.ordered
241
+ return cls(categories, ordered)
242
+
243
+ @classmethod
244
+ def _from_values_or_dtype(
245
+ cls,
246
+ values=None,
247
+ categories=None,
248
+ ordered: bool | None = None,
249
+ dtype: Dtype | None = None,
250
+ ) -> CategoricalDtype:
251
+ """
252
+ Construct dtype from the input parameters used in :class:`Categorical`.
253
+
254
+ This constructor method specifically does not do the factorization
255
+ step, if that is needed to find the categories. This constructor may
256
+ therefore return ``CategoricalDtype(categories=None, ordered=None)``,
257
+ which may not be useful. Additional steps may therefore have to be
258
+ taken to create the final dtype.
259
+
260
+ The return dtype is specified from the inputs in this prioritized
261
+ order:
262
+ 1. if dtype is a CategoricalDtype, return dtype
263
+ 2. if dtype is the string 'category', create a CategoricalDtype from
264
+ the supplied categories and ordered parameters, and return that.
265
+ 3. if values is a categorical, use value.dtype, but override it with
266
+ categories and ordered if either/both of those are not None.
267
+ 4. if dtype is None and values is not a categorical, construct the
268
+ dtype from categories and ordered, even if either of those is None.
269
+
270
+ Parameters
271
+ ----------
272
+ values : list-like, optional
273
+ The list-like must be 1-dimensional.
274
+ categories : list-like, optional
275
+ Categories for the CategoricalDtype.
276
+ ordered : bool, optional
277
+ Designating if the categories are ordered.
278
+ dtype : CategoricalDtype or the string "category", optional
279
+ If ``CategoricalDtype``, cannot be used together with
280
+ `categories` or `ordered`.
281
+
282
+ Returns
283
+ -------
284
+ CategoricalDtype
285
+
286
+ Examples
287
+ --------
288
+ >>> pd.CategoricalDtype._from_values_or_dtype()
289
+ CategoricalDtype(categories=None, ordered=None, categories_dtype=None)
290
+ >>> pd.CategoricalDtype._from_values_or_dtype(
291
+ ... categories=['a', 'b'], ordered=True
292
+ ... )
293
+ CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object)
294
+ >>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True)
295
+ >>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False)
296
+ >>> c = pd.Categorical([0, 1], dtype=dtype1)
297
+ >>> pd.CategoricalDtype._from_values_or_dtype(
298
+ ... c, ['x', 'y'], ordered=True, dtype=dtype2
299
+ ... )
300
+ Traceback (most recent call last):
301
+ ...
302
+ ValueError: Cannot specify `categories` or `ordered` together with
303
+ `dtype`.
304
+
305
+ The supplied dtype takes precedence over values' dtype:
306
+
307
+ >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2)
308
+ CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object)
309
+ """
310
+
311
+ if dtype is not None:
312
+ # The dtype argument takes precedence over values.dtype (if any)
313
+ if isinstance(dtype, str):
314
+ if dtype == "category":
315
+ if ordered is None and cls.is_dtype(values):
316
+ # GH#49309 preserve orderedness
317
+ ordered = values.dtype.ordered
318
+
319
+ dtype = CategoricalDtype(categories, ordered)
320
+ else:
321
+ raise ValueError(f"Unknown dtype {repr(dtype)}")
322
+ elif categories is not None or ordered is not None:
323
+ raise ValueError(
324
+ "Cannot specify `categories` or `ordered` together with `dtype`."
325
+ )
326
+ elif not isinstance(dtype, CategoricalDtype):
327
+ raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}")
328
+ elif cls.is_dtype(values):
329
+ # If no "dtype" was passed, use the one from "values", but honor
330
+ # the "ordered" and "categories" arguments
331
+ dtype = values.dtype._from_categorical_dtype(
332
+ values.dtype, categories, ordered
333
+ )
334
+ else:
335
+ # If dtype=None and values is not categorical, create a new dtype.
336
+ # Note: This could potentially have categories=None and
337
+ # ordered=None.
338
+ dtype = CategoricalDtype(categories, ordered)
339
+
340
+ return cast(CategoricalDtype, dtype)
341
+
342
+ @classmethod
343
+ def construct_from_string(cls, string: str_type) -> CategoricalDtype:
344
+ """
345
+ Construct a CategoricalDtype from a string.
346
+
347
+ Parameters
348
+ ----------
349
+ string : str
350
+ Must be the string "category" in order to be successfully constructed.
351
+
352
+ Returns
353
+ -------
354
+ CategoricalDtype
355
+ Instance of the dtype.
356
+
357
+ Raises
358
+ ------
359
+ TypeError
360
+ If a CategoricalDtype cannot be constructed from the input.
361
+ """
362
+ if not isinstance(string, str):
363
+ raise TypeError(
364
+ f"'construct_from_string' expects a string, got {type(string)}"
365
+ )
366
+ if string != cls.name:
367
+ raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'")
368
+
369
+ # need ordered=None to ensure that operations specifying dtype="category" don't
370
+ # override the ordered value for existing categoricals
371
+ return cls(ordered=None)
372
+
373
+ def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None:
374
+ if ordered is not None:
375
+ self.validate_ordered(ordered)
376
+
377
+ if categories is not None:
378
+ categories = self.validate_categories(categories, fastpath=fastpath)
379
+
380
+ self._categories = categories
381
+ self._ordered = ordered
382
+
383
+ def __setstate__(self, state: MutableMapping[str_type, Any]) -> None:
384
+ # for pickle compat. __get_state__ is defined in the
385
+ # PandasExtensionDtype superclass and uses the public properties to
386
+ # pickle -> need to set the settable private ones here (see GH26067)
387
+ self._categories = state.pop("categories", None)
388
+ self._ordered = state.pop("ordered", False)
389
+
390
+ def __hash__(self) -> int:
391
+ # _hash_categories returns a uint64, so use the negative
392
+ # space for when we have unknown categories to avoid a conflict
393
+ if self.categories is None:
394
+ if self.ordered:
395
+ return -1
396
+ else:
397
+ return -2
398
+ # We *do* want to include the real self.ordered here
399
+ return int(self._hash_categories)
400
+
401
+ def __eq__(self, other: object) -> bool:
402
+ """
403
+ Rules for CDT equality:
404
+ 1) Any CDT is equal to the string 'category'
405
+ 2) Any CDT is equal to itself
406
+ 3) Any CDT is equal to a CDT with categories=None regardless of ordered
407
+ 4) A CDT with ordered=True is only equal to another CDT with
408
+ ordered=True and identical categories in the same order
409
+ 5) A CDT with ordered={False, None} is only equal to another CDT with
410
+ ordered={False, None} and identical categories, but same order is
411
+ not required. There is no distinction between False/None.
412
+ 6) Any other comparison returns False
413
+ """
414
+ if isinstance(other, str):
415
+ return other == self.name
416
+ elif other is self:
417
+ return True
418
+ elif not (hasattr(other, "ordered") and hasattr(other, "categories")):
419
+ return False
420
+ elif self.categories is None or other.categories is None:
421
+ # For non-fully-initialized dtypes, these are only equal to
422
+ # - the string "category" (handled above)
423
+ # - other CategoricalDtype with categories=None
424
+ return self.categories is other.categories
425
+ elif self.ordered or other.ordered:
426
+ # At least one has ordered=True; equal if both have ordered=True
427
+ # and the same values for categories in the same order.
428
+ return (self.ordered == other.ordered) and self.categories.equals(
429
+ other.categories
430
+ )
431
+ else:
432
+ # Neither has ordered=True; equal if both have the same categories,
433
+ # but same order is not necessary. There is no distinction between
434
+ # ordered=False and ordered=None: CDT(., False) and CDT(., None)
435
+ # will be equal if they have the same categories.
436
+ left = self.categories
437
+ right = other.categories
438
+
439
+ # GH#36280 the ordering of checks here is for performance
440
+ if not left.dtype == right.dtype:
441
+ return False
442
+
443
+ if len(left) != len(right):
444
+ return False
445
+
446
+ if self.categories.equals(other.categories):
447
+ # Check and see if they happen to be identical categories
448
+ return True
449
+
450
+ if left.dtype != object:
451
+ # Faster than calculating hash
452
+ indexer = left.get_indexer(right)
453
+ # Because left and right have the same length and are unique,
454
+ # `indexer` not having any -1s implies that there is a
455
+ # bijection between `left` and `right`.
456
+ return (indexer != -1).all()
457
+
458
+ # With object-dtype we need a comparison that identifies
459
+ # e.g. int(2) as distinct from float(2)
460
+ return set(left) == set(right)
461
+
462
+ def __repr__(self) -> str_type:
463
+ if self.categories is None:
464
+ data = "None"
465
+ dtype = "None"
466
+ else:
467
+ data = self.categories._format_data(name=type(self).__name__)
468
+ if isinstance(self.categories, ABCRangeIndex):
469
+ data = str(self.categories._range)
470
+ data = data.rstrip(", ")
471
+ dtype = self.categories.dtype
472
+
473
+ return (
474
+ f"CategoricalDtype(categories={data}, ordered={self.ordered}, "
475
+ f"categories_dtype={dtype})"
476
+ )
477
+
478
+ @cache_readonly
479
+ def _hash_categories(self) -> int:
480
+ from pandas.core.util.hashing import (
481
+ combine_hash_arrays,
482
+ hash_array,
483
+ hash_tuples,
484
+ )
485
+
486
+ categories = self.categories
487
+ ordered = self.ordered
488
+
489
+ if len(categories) and isinstance(categories[0], tuple):
490
+ # assumes if any individual category is a tuple, then all our. ATM
491
+ # I don't really want to support just some of the categories being
492
+ # tuples.
493
+ cat_list = list(categories) # breaks if a np.array of categories
494
+ cat_array = hash_tuples(cat_list)
495
+ else:
496
+ if categories.dtype == "O" and len({type(x) for x in categories}) != 1:
497
+ # TODO: hash_array doesn't handle mixed types. It casts
498
+ # everything to a str first, which means we treat
499
+ # {'1', '2'} the same as {'1', 2}
500
+ # find a better solution
501
+ hashed = hash((tuple(categories), ordered))
502
+ return hashed
503
+
504
+ if DatetimeTZDtype.is_dtype(categories.dtype):
505
+ # Avoid future warning.
506
+ categories = categories.view("datetime64[ns]")
507
+
508
+ cat_array = hash_array(np.asarray(categories), categorize=False)
509
+ if ordered:
510
+ cat_array = np.vstack(
511
+ [cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)]
512
+ )
513
+ else:
514
+ cat_array = np.array([cat_array])
515
+ combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array))
516
+ return np.bitwise_xor.reduce(combined_hashed)
517
+
518
+ @classmethod
519
+ def construct_array_type(cls) -> type_t[Categorical]:
520
+ """
521
+ Return the array type associated with this dtype.
522
+
523
+ Returns
524
+ -------
525
+ type
526
+ """
527
+ from pandas import Categorical
528
+
529
+ return Categorical
530
+
531
+ @staticmethod
532
+ def validate_ordered(ordered: Ordered) -> None:
533
+ """
534
+ Validates that we have a valid ordered parameter. If
535
+ it is not a boolean, a TypeError will be raised.
536
+
537
+ Parameters
538
+ ----------
539
+ ordered : object
540
+ The parameter to be verified.
541
+
542
+ Raises
543
+ ------
544
+ TypeError
545
+ If 'ordered' is not a boolean.
546
+ """
547
+ if not is_bool(ordered):
548
+ raise TypeError("'ordered' must either be 'True' or 'False'")
549
+
550
+ @staticmethod
551
+ def validate_categories(categories, fastpath: bool = False) -> Index:
552
+ """
553
+ Validates that we have good categories
554
+
555
+ Parameters
556
+ ----------
557
+ categories : array-like
558
+ fastpath : bool
559
+ Whether to skip nan and uniqueness checks
560
+
561
+ Returns
562
+ -------
563
+ categories : Index
564
+ """
565
+ from pandas.core.indexes.base import Index
566
+
567
+ if not fastpath and not is_list_like(categories):
568
+ raise TypeError(
569
+ f"Parameter 'categories' must be list-like, was {repr(categories)}"
570
+ )
571
+ if not isinstance(categories, ABCIndex):
572
+ categories = Index._with_infer(categories, tupleize_cols=False)
573
+
574
+ if not fastpath:
575
+ if categories.hasnans:
576
+ raise ValueError("Categorical categories cannot be null")
577
+
578
+ if not categories.is_unique:
579
+ raise ValueError("Categorical categories must be unique")
580
+
581
+ if isinstance(categories, ABCCategoricalIndex):
582
+ categories = categories.categories
583
+
584
+ return categories
585
+
586
+ def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype:
587
+ """
588
+ Returns a CategoricalDtype with categories and ordered taken from dtype
589
+ if specified, otherwise falling back to self if unspecified
590
+
591
+ Parameters
592
+ ----------
593
+ dtype : CategoricalDtype
594
+
595
+ Returns
596
+ -------
597
+ new_dtype : CategoricalDtype
598
+ """
599
+ if isinstance(dtype, str) and dtype == "category":
600
+ # dtype='category' should not change anything
601
+ return self
602
+ elif not self.is_dtype(dtype):
603
+ raise ValueError(
604
+ f"a CategoricalDtype must be passed to perform an update, "
605
+ f"got {repr(dtype)}"
606
+ )
607
+ else:
608
+ # from here on, dtype is a CategoricalDtype
609
+ dtype = cast(CategoricalDtype, dtype)
610
+
611
+ # update categories/ordered unless they've been explicitly passed as None
612
+ new_categories = (
613
+ dtype.categories if dtype.categories is not None else self.categories
614
+ )
615
+ new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered
616
+
617
+ return CategoricalDtype(new_categories, new_ordered)
618
+
619
+ @property
620
+ def categories(self) -> Index:
621
+ """
622
+ An ``Index`` containing the unique categories allowed.
623
+
624
+ Examples
625
+ --------
626
+ >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
627
+ >>> cat_type.categories
628
+ Index(['a', 'b'], dtype='object')
629
+ """
630
+ return self._categories
631
+
632
+ @property
633
+ def ordered(self) -> Ordered:
634
+ """
635
+ Whether the categories have an ordered relationship.
636
+
637
+ Examples
638
+ --------
639
+ >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
640
+ >>> cat_type.ordered
641
+ True
642
+
643
+ >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False)
644
+ >>> cat_type.ordered
645
+ False
646
+ """
647
+ return self._ordered
648
+
649
+ @property
650
+ def _is_boolean(self) -> bool:
651
+ from pandas.core.dtypes.common import is_bool_dtype
652
+
653
+ return is_bool_dtype(self.categories)
654
+
655
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
656
+ # check if we have all categorical dtype with identical categories
657
+ if all(isinstance(x, CategoricalDtype) for x in dtypes):
658
+ first = dtypes[0]
659
+ if all(first == other for other in dtypes[1:]):
660
+ return first
661
+
662
+ # special case non-initialized categorical
663
+ # TODO we should figure out the expected return value in general
664
+ non_init_cats = [
665
+ isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes
666
+ ]
667
+ if all(non_init_cats):
668
+ return self
669
+ elif any(non_init_cats):
670
+ return None
671
+
672
+ # categorical is aware of Sparse -> extract sparse subdtypes
673
+ dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
674
+ # extract the categories' dtype
675
+ non_cat_dtypes = [
676
+ x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes
677
+ ]
678
+ # TODO should categorical always give an answer?
679
+ from pandas.core.dtypes.cast import find_common_type
680
+
681
+ return find_common_type(non_cat_dtypes)
682
+
683
+ @cache_readonly
684
+ def index_class(self) -> type_t[CategoricalIndex]:
685
+ from pandas import CategoricalIndex
686
+
687
+ return CategoricalIndex
688
+
689
+
690
+ @register_extension_dtype
691
+ class DatetimeTZDtype(PandasExtensionDtype):
692
+ """
693
+ An ExtensionDtype for timezone-aware datetime data.
694
+
695
+ **This is not an actual numpy dtype**, but a duck type.
696
+
697
+ Parameters
698
+ ----------
699
+ unit : str, default "ns"
700
+ The precision of the datetime data. Currently limited
701
+ to ``"ns"``.
702
+ tz : str, int, or datetime.tzinfo
703
+ The timezone.
704
+
705
+ Attributes
706
+ ----------
707
+ unit
708
+ tz
709
+
710
+ Methods
711
+ -------
712
+ None
713
+
714
+ Raises
715
+ ------
716
+ ZoneInfoNotFoundError
717
+ When the requested timezone cannot be found.
718
+
719
+ Examples
720
+ --------
721
+ >>> from zoneinfo import ZoneInfo
722
+ >>> pd.DatetimeTZDtype(tz=ZoneInfo('UTC'))
723
+ datetime64[ns, UTC]
724
+
725
+ >>> pd.DatetimeTZDtype(tz=ZoneInfo('Europe/Paris'))
726
+ datetime64[ns, Europe/Paris]
727
+ """
728
+
729
+ type: type[Timestamp] = Timestamp
730
+ kind: str_type = "M"
731
+ num = 101
732
+ _metadata = ("unit", "tz")
733
+ _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
734
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
735
+ _supports_2d = True
736
+ _can_fast_transpose = True
737
+
738
+ @property
739
+ def na_value(self) -> NaTType:
740
+ return NaT
741
+
742
+ @cache_readonly
743
+ def base(self) -> DtypeObj: # type: ignore[override]
744
+ return np.dtype(f"M8[{self.unit}]")
745
+
746
+ # error: Signature of "str" incompatible with supertype "PandasExtensionDtype"
747
+ @cache_readonly
748
+ def str(self) -> str: # type: ignore[override]
749
+ return f"|M8[{self.unit}]"
750
+
751
+ def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None:
752
+ if isinstance(unit, DatetimeTZDtype):
753
+ # error: "str" has no attribute "tz"
754
+ unit, tz = unit.unit, unit.tz # type: ignore[attr-defined]
755
+
756
+ if unit != "ns":
757
+ if isinstance(unit, str) and tz is None:
758
+ # maybe a string like datetime64[ns, tz], which we support for
759
+ # now.
760
+ result = type(self).construct_from_string(unit)
761
+ unit = result.unit
762
+ tz = result.tz
763
+ msg = (
764
+ f"Passing a dtype alias like 'datetime64[ns, {tz}]' "
765
+ "to DatetimeTZDtype is no longer supported. Use "
766
+ "'DatetimeTZDtype.construct_from_string()' instead."
767
+ )
768
+ raise ValueError(msg)
769
+ if unit not in ["s", "ms", "us", "ns"]:
770
+ raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units")
771
+
772
+ if tz:
773
+ tz = timezones.maybe_get_tz(tz)
774
+ tz = timezones.tz_standardize(tz)
775
+ elif tz is not None:
776
+ raise pytz.UnknownTimeZoneError(tz)
777
+ if tz is None:
778
+ raise TypeError("A 'tz' is required.")
779
+
780
+ self._unit = unit
781
+ self._tz = tz
782
+
783
+ @cache_readonly
784
+ def _creso(self) -> int:
785
+ """
786
+ The NPY_DATETIMEUNIT corresponding to this dtype's resolution.
787
+ """
788
+ return abbrev_to_npy_unit(self.unit)
789
+
790
+ @property
791
+ def unit(self) -> str_type:
792
+ """
793
+ The precision of the datetime data.
794
+
795
+ Examples
796
+ --------
797
+ >>> from zoneinfo import ZoneInfo
798
+ >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
799
+ >>> dtype.unit
800
+ 'ns'
801
+ """
802
+ return self._unit
803
+
804
+ @property
805
+ def tz(self) -> tzinfo:
806
+ """
807
+ The timezone.
808
+
809
+ Examples
810
+ --------
811
+ >>> from zoneinfo import ZoneInfo
812
+ >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
813
+ >>> dtype.tz
814
+ zoneinfo.ZoneInfo(key='America/Los_Angeles')
815
+ """
816
+ return self._tz
817
+
818
+ @classmethod
819
+ def construct_array_type(cls) -> type_t[DatetimeArray]:
820
+ """
821
+ Return the array type associated with this dtype.
822
+
823
+ Returns
824
+ -------
825
+ type
826
+ """
827
+ from pandas.core.arrays import DatetimeArray
828
+
829
+ return DatetimeArray
830
+
831
+ @classmethod
832
+ def construct_from_string(cls, string: str_type) -> DatetimeTZDtype:
833
+ """
834
+ Construct a DatetimeTZDtype from a string.
835
+
836
+ Parameters
837
+ ----------
838
+ string : str
839
+ The string alias for this DatetimeTZDtype.
840
+ Should be formatted like ``datetime64[ns, <tz>]``,
841
+ where ``<tz>`` is the timezone name.
842
+
843
+ Examples
844
+ --------
845
+ >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]')
846
+ datetime64[ns, UTC]
847
+ """
848
+ if not isinstance(string, str):
849
+ raise TypeError(
850
+ f"'construct_from_string' expects a string, got {type(string)}"
851
+ )
852
+
853
+ msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
854
+ match = cls._match.match(string)
855
+ if match:
856
+ d = match.groupdict()
857
+ try:
858
+ return cls(unit=d["unit"], tz=d["tz"])
859
+ except (KeyError, TypeError, ValueError) as err:
860
+ # KeyError if maybe_get_tz tries and fails to get a
861
+ # pytz timezone (actually pytz.UnknownTimeZoneError).
862
+ # TypeError if we pass a nonsense tz;
863
+ # ValueError if we pass a unit other than "ns"
864
+ raise TypeError(msg) from err
865
+ raise TypeError(msg)
866
+
867
+ def __str__(self) -> str_type:
868
+ return f"datetime64[{self.unit}, {self.tz}]"
869
+
870
+ @property
871
+ def name(self) -> str_type:
872
+ """A string representation of the dtype."""
873
+ return str(self)
874
+
875
+ def __hash__(self) -> int:
876
+ # make myself hashable
877
+ # TODO: update this.
878
+ return hash(str(self))
879
+
880
+ def __eq__(self, other: object) -> bool:
881
+ if isinstance(other, str):
882
+ if other.startswith("M8["):
883
+ other = f"datetime64[{other[3:]}"
884
+ return other == self.name
885
+
886
+ return (
887
+ isinstance(other, DatetimeTZDtype)
888
+ and self.unit == other.unit
889
+ and tz_compare(self.tz, other.tz)
890
+ )
891
+
892
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray:
893
+ """
894
+ Construct DatetimeArray from pyarrow Array/ChunkedArray.
895
+
896
+ Note: If the units in the pyarrow Array are the same as this
897
+ DatetimeDtype, then values corresponding to the integer representation
898
+ of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`)
899
+ are converted to ``NaT``, regardless of the null indicator in the
900
+ pyarrow array.
901
+
902
+ Parameters
903
+ ----------
904
+ array : pyarrow.Array or pyarrow.ChunkedArray
905
+ The Arrow array to convert to DatetimeArray.
906
+
907
+ Returns
908
+ -------
909
+ extension array : DatetimeArray
910
+ """
911
+ import pyarrow
912
+
913
+ from pandas.core.arrays import DatetimeArray
914
+
915
+ array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True)
916
+
917
+ if isinstance(array, pyarrow.Array):
918
+ np_arr = array.to_numpy(zero_copy_only=False)
919
+ else:
920
+ np_arr = array.to_numpy()
921
+
922
+ return DatetimeArray._simple_new(np_arr, dtype=self)
923
+
924
+ def __setstate__(self, state) -> None:
925
+ # for pickle compat. __get_state__ is defined in the
926
+ # PandasExtensionDtype superclass and uses the public properties to
927
+ # pickle -> need to set the settable private ones here (see GH26067)
928
+ self._tz = state["tz"]
929
+ self._unit = state["unit"]
930
+
931
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
932
+ if all(isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes):
933
+ np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]])
934
+ unit = np.datetime_data(np_dtype)[0]
935
+ return type(self)(unit=unit, tz=self.tz)
936
+ return super()._get_common_dtype(dtypes)
937
+
938
+ @cache_readonly
939
+ def index_class(self) -> type_t[DatetimeIndex]:
940
+ from pandas import DatetimeIndex
941
+
942
+ return DatetimeIndex
943
+
944
+
945
+ @register_extension_dtype
946
+ class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype):
947
+ """
948
+ An ExtensionDtype for Period data.
949
+
950
+ **This is not an actual numpy dtype**, but a duck type.
951
+
952
+ Parameters
953
+ ----------
954
+ freq : str or DateOffset
955
+ The frequency of this PeriodDtype.
956
+
957
+ Attributes
958
+ ----------
959
+ freq
960
+
961
+ Methods
962
+ -------
963
+ None
964
+
965
+ Examples
966
+ --------
967
+ >>> pd.PeriodDtype(freq='D')
968
+ period[D]
969
+
970
+ >>> pd.PeriodDtype(freq=pd.offsets.MonthEnd())
971
+ period[M]
972
+ """
973
+
974
+ type: type[Period] = Period
975
+ kind: str_type = "O"
976
+ str = "|O08"
977
+ base = np.dtype("O")
978
+ num = 102
979
+ _metadata = ("freq",)
980
+ _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]")
981
+ # error: Incompatible types in assignment (expression has type
982
+ # "Dict[int, PandasExtensionDtype]", base class "PandasExtensionDtype"
983
+ # defined the type as "Dict[str, PandasExtensionDtype]") [assignment]
984
+ _cache_dtypes: dict[BaseOffset, int] = {} # type: ignore[assignment]
985
+ __hash__ = PeriodDtypeBase.__hash__
986
+ _freq: BaseOffset
987
+ _supports_2d = True
988
+ _can_fast_transpose = True
989
+
990
+ def __new__(cls, freq) -> PeriodDtype: # noqa: PYI034
991
+ """
992
+ Parameters
993
+ ----------
994
+ freq : PeriodDtype, BaseOffset, or string
995
+ """
996
+ if isinstance(freq, PeriodDtype):
997
+ return freq
998
+
999
+ if not isinstance(freq, BaseOffset):
1000
+ freq = cls._parse_dtype_strict(freq)
1001
+
1002
+ if isinstance(freq, BDay):
1003
+ # GH#53446
1004
+ # TODO(3.0): enforcing this will close GH#10575
1005
+ warnings.warn(
1006
+ "PeriodDtype[B] is deprecated and will be removed in a future "
1007
+ "version. Use a DatetimeIndex with freq='B' instead",
1008
+ FutureWarning,
1009
+ stacklevel=find_stack_level(),
1010
+ )
1011
+
1012
+ try:
1013
+ dtype_code = cls._cache_dtypes[freq]
1014
+ except KeyError:
1015
+ dtype_code = freq._period_dtype_code
1016
+ cls._cache_dtypes[freq] = dtype_code
1017
+ u = PeriodDtypeBase.__new__(cls, dtype_code, freq.n)
1018
+ u._freq = freq
1019
+ return u
1020
+
1021
+ def __reduce__(self) -> tuple[type_t[Self], tuple[str_type]]:
1022
+ return type(self), (self.name,)
1023
+
1024
+ @property
1025
+ def freq(self) -> BaseOffset:
1026
+ """
1027
+ The frequency object of this PeriodDtype.
1028
+
1029
+ Examples
1030
+ --------
1031
+ >>> dtype = pd.PeriodDtype(freq='D')
1032
+ >>> dtype.freq
1033
+ <Day>
1034
+ """
1035
+ return self._freq
1036
+
1037
+ @classmethod
1038
+ def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset:
1039
+ if isinstance(freq, str): # note: freq is already of type str!
1040
+ if freq.startswith(("Period[", "period[")):
1041
+ m = cls._match.search(freq)
1042
+ if m is not None:
1043
+ freq = m.group("freq")
1044
+
1045
+ freq_offset = to_offset(freq, is_period=True)
1046
+ if freq_offset is not None:
1047
+ return freq_offset
1048
+
1049
+ raise TypeError(
1050
+ "PeriodDtype argument should be string or BaseOffset, "
1051
+ f"got {type(freq).__name__}"
1052
+ )
1053
+
1054
+ @classmethod
1055
+ def construct_from_string(cls, string: str_type) -> PeriodDtype:
1056
+ """
1057
+ Strict construction from a string, raise a TypeError if not
1058
+ possible
1059
+ """
1060
+ if (
1061
+ isinstance(string, str)
1062
+ and (string.startswith(("period[", "Period[")))
1063
+ or isinstance(string, BaseOffset)
1064
+ ):
1065
+ # do not parse string like U as period[U]
1066
+ # avoid tuple to be regarded as freq
1067
+ try:
1068
+ return cls(freq=string)
1069
+ except ValueError:
1070
+ pass
1071
+ if isinstance(string, str):
1072
+ msg = f"Cannot construct a 'PeriodDtype' from '{string}'"
1073
+ else:
1074
+ msg = f"'construct_from_string' expects a string, got {type(string)}"
1075
+ raise TypeError(msg)
1076
+
1077
+ def __str__(self) -> str_type:
1078
+ return self.name
1079
+
1080
+ @property
1081
+ def name(self) -> str_type:
1082
+ return f"period[{self._freqstr}]"
1083
+
1084
+ @property
1085
+ def na_value(self) -> NaTType:
1086
+ return NaT
1087
+
1088
+ def __eq__(self, other: object) -> bool:
1089
+ if isinstance(other, str):
1090
+ return other in [self.name, capitalize_first_letter(self.name)]
1091
+
1092
+ return super().__eq__(other)
1093
+
1094
+ def __ne__(self, other: object) -> bool:
1095
+ return not self.__eq__(other)
1096
+
1097
+ @classmethod
1098
+ def is_dtype(cls, dtype: object) -> bool:
1099
+ """
1100
+ Return a boolean if we if the passed type is an actual dtype that we
1101
+ can match (via string or type)
1102
+ """
1103
+ if isinstance(dtype, str):
1104
+ # PeriodDtype can be instantiated from freq string like "U",
1105
+ # but doesn't regard freq str like "U" as dtype.
1106
+ if dtype.startswith(("period[", "Period[")):
1107
+ try:
1108
+ return cls._parse_dtype_strict(dtype) is not None
1109
+ except ValueError:
1110
+ return False
1111
+ else:
1112
+ return False
1113
+ return super().is_dtype(dtype)
1114
+
1115
+ @classmethod
1116
+ def construct_array_type(cls) -> type_t[PeriodArray]:
1117
+ """
1118
+ Return the array type associated with this dtype.
1119
+
1120
+ Returns
1121
+ -------
1122
+ type
1123
+ """
1124
+ from pandas.core.arrays import PeriodArray
1125
+
1126
+ return PeriodArray
1127
+
1128
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray:
1129
+ """
1130
+ Construct PeriodArray from pyarrow Array/ChunkedArray.
1131
+ """
1132
+ import pyarrow
1133
+
1134
+ from pandas.core.arrays import PeriodArray
1135
+ from pandas.core.arrays.arrow._arrow_utils import (
1136
+ pyarrow_array_to_numpy_and_mask,
1137
+ )
1138
+
1139
+ if isinstance(array, pyarrow.Array):
1140
+ chunks = [array]
1141
+ else:
1142
+ chunks = array.chunks
1143
+
1144
+ results = []
1145
+ for arr in chunks:
1146
+ data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64))
1147
+ parr = PeriodArray(data.copy(), dtype=self, copy=False)
1148
+ # error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray";
1149
+ # expected type "Union[int, Sequence[int], Sequence[bool], slice]"
1150
+ parr[~mask] = NaT # type: ignore[index]
1151
+ results.append(parr)
1152
+
1153
+ if not results:
1154
+ return PeriodArray(np.array([], dtype="int64"), dtype=self, copy=False)
1155
+ return PeriodArray._concat_same_type(results)
1156
+
1157
+ @cache_readonly
1158
+ def index_class(self) -> type_t[PeriodIndex]:
1159
+ from pandas import PeriodIndex
1160
+
1161
+ return PeriodIndex
1162
+
1163
+
1164
+ @register_extension_dtype
1165
+ class IntervalDtype(PandasExtensionDtype):
1166
+ """
1167
+ An ExtensionDtype for Interval data.
1168
+
1169
+ **This is not an actual numpy dtype**, but a duck type.
1170
+
1171
+ Parameters
1172
+ ----------
1173
+ subtype : str, np.dtype
1174
+ The dtype of the Interval bounds.
1175
+
1176
+ Attributes
1177
+ ----------
1178
+ subtype
1179
+
1180
+ Methods
1181
+ -------
1182
+ None
1183
+
1184
+ Examples
1185
+ --------
1186
+ >>> pd.IntervalDtype(subtype='int64', closed='both')
1187
+ interval[int64, both]
1188
+ """
1189
+
1190
+ name = "interval"
1191
+ kind: str_type = "O"
1192
+ str = "|O08"
1193
+ base = np.dtype("O")
1194
+ num = 103
1195
+ _metadata = (
1196
+ "subtype",
1197
+ "closed",
1198
+ )
1199
+
1200
+ _match = re.compile(
1201
+ r"(I|i)nterval\[(?P<subtype>[^,]+(\[.+\])?)"
1202
+ r"(, (?P<closed>(right|left|both|neither)))?\]"
1203
+ )
1204
+
1205
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
1206
+ _subtype: None | np.dtype
1207
+ _closed: IntervalClosedType | None
1208
+
1209
+ def __init__(self, subtype=None, closed: IntervalClosedType | None = None) -> None:
1210
+ from pandas.core.dtypes.common import (
1211
+ is_string_dtype,
1212
+ pandas_dtype,
1213
+ )
1214
+
1215
+ if closed is not None and closed not in {"right", "left", "both", "neither"}:
1216
+ raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'")
1217
+
1218
+ if isinstance(subtype, IntervalDtype):
1219
+ if closed is not None and closed != subtype.closed:
1220
+ raise ValueError(
1221
+ "dtype.closed and 'closed' do not match. "
1222
+ "Try IntervalDtype(dtype.subtype, closed) instead."
1223
+ )
1224
+ self._subtype = subtype._subtype
1225
+ self._closed = subtype._closed
1226
+ elif subtype is None:
1227
+ # we are called as an empty constructor
1228
+ # generally for pickle compat
1229
+ self._subtype = None
1230
+ self._closed = closed
1231
+ elif isinstance(subtype, str) and subtype.lower() == "interval":
1232
+ self._subtype = None
1233
+ self._closed = closed
1234
+ else:
1235
+ if isinstance(subtype, str):
1236
+ m = IntervalDtype._match.search(subtype)
1237
+ if m is not None:
1238
+ gd = m.groupdict()
1239
+ subtype = gd["subtype"]
1240
+ if gd.get("closed", None) is not None:
1241
+ if closed is not None:
1242
+ if closed != gd["closed"]:
1243
+ raise ValueError(
1244
+ "'closed' keyword does not match value "
1245
+ "specified in dtype string"
1246
+ )
1247
+ closed = gd["closed"] # type: ignore[assignment]
1248
+
1249
+ try:
1250
+ subtype = pandas_dtype(subtype)
1251
+ except TypeError as err:
1252
+ raise TypeError("could not construct IntervalDtype") from err
1253
+ if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype):
1254
+ # GH 19016
1255
+ msg = (
1256
+ "category, object, and string subtypes are not supported "
1257
+ "for IntervalDtype"
1258
+ )
1259
+ raise TypeError(msg)
1260
+ self._subtype = subtype
1261
+ self._closed = closed
1262
+
1263
+ @cache_readonly
1264
+ def _can_hold_na(self) -> bool:
1265
+ subtype = self._subtype
1266
+ if subtype is None:
1267
+ # partially-initialized
1268
+ raise NotImplementedError(
1269
+ "_can_hold_na is not defined for partially-initialized IntervalDtype"
1270
+ )
1271
+ if subtype.kind in "iu":
1272
+ return False
1273
+ return True
1274
+
1275
+ @property
1276
+ def closed(self) -> IntervalClosedType:
1277
+ return self._closed # type: ignore[return-value]
1278
+
1279
+ @property
1280
+ def subtype(self):
1281
+ """
1282
+ The dtype of the Interval bounds.
1283
+
1284
+ Examples
1285
+ --------
1286
+ >>> dtype = pd.IntervalDtype(subtype='int64', closed='both')
1287
+ >>> dtype.subtype
1288
+ dtype('int64')
1289
+ """
1290
+ return self._subtype
1291
+
1292
+ @classmethod
1293
+ def construct_array_type(cls) -> type[IntervalArray]:
1294
+ """
1295
+ Return the array type associated with this dtype.
1296
+
1297
+ Returns
1298
+ -------
1299
+ type
1300
+ """
1301
+ from pandas.core.arrays import IntervalArray
1302
+
1303
+ return IntervalArray
1304
+
1305
+ @classmethod
1306
+ def construct_from_string(cls, string: str_type) -> IntervalDtype:
1307
+ """
1308
+ attempt to construct this type from a string, raise a TypeError
1309
+ if its not possible
1310
+ """
1311
+ if not isinstance(string, str):
1312
+ raise TypeError(
1313
+ f"'construct_from_string' expects a string, got {type(string)}"
1314
+ )
1315
+
1316
+ if string.lower() == "interval" or cls._match.search(string) is not None:
1317
+ return cls(string)
1318
+
1319
+ msg = (
1320
+ f"Cannot construct a 'IntervalDtype' from '{string}'.\n\n"
1321
+ "Incorrectly formatted string passed to constructor. "
1322
+ "Valid formats include Interval or Interval[dtype] "
1323
+ "where dtype is numeric, datetime, or timedelta"
1324
+ )
1325
+ raise TypeError(msg)
1326
+
1327
+ @property
1328
+ def type(self) -> type[Interval]:
1329
+ return Interval
1330
+
1331
+ def __str__(self) -> str_type:
1332
+ if self.subtype is None:
1333
+ return "interval"
1334
+ if self.closed is None:
1335
+ # Only partially initialized GH#38394
1336
+ return f"interval[{self.subtype}]"
1337
+ return f"interval[{self.subtype}, {self.closed}]"
1338
+
1339
+ def __hash__(self) -> int:
1340
+ # make myself hashable
1341
+ return hash(str(self))
1342
+
1343
+ def __eq__(self, other: object) -> bool:
1344
+ if isinstance(other, str):
1345
+ return other.lower() in (self.name.lower(), str(self).lower())
1346
+ elif not isinstance(other, IntervalDtype):
1347
+ return False
1348
+ elif self.subtype is None or other.subtype is None:
1349
+ # None should match any subtype
1350
+ return True
1351
+ elif self.closed != other.closed:
1352
+ return False
1353
+ else:
1354
+ return self.subtype == other.subtype
1355
+
1356
+ def __setstate__(self, state) -> None:
1357
+ # for pickle compat. __get_state__ is defined in the
1358
+ # PandasExtensionDtype superclass and uses the public properties to
1359
+ # pickle -> need to set the settable private ones here (see GH26067)
1360
+ self._subtype = state["subtype"]
1361
+
1362
+ # backward-compat older pickles won't have "closed" key
1363
+ self._closed = state.pop("closed", None)
1364
+
1365
+ @classmethod
1366
+ def is_dtype(cls, dtype: object) -> bool:
1367
+ """
1368
+ Return a boolean if we if the passed type is an actual dtype that we
1369
+ can match (via string or type)
1370
+ """
1371
+ if isinstance(dtype, str):
1372
+ if dtype.lower().startswith("interval"):
1373
+ try:
1374
+ return cls.construct_from_string(dtype) is not None
1375
+ except (ValueError, TypeError):
1376
+ return False
1377
+ else:
1378
+ return False
1379
+ return super().is_dtype(dtype)
1380
+
1381
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray:
1382
+ """
1383
+ Construct IntervalArray from pyarrow Array/ChunkedArray.
1384
+ """
1385
+ import pyarrow
1386
+
1387
+ from pandas.core.arrays import IntervalArray
1388
+
1389
+ if isinstance(array, pyarrow.Array):
1390
+ chunks = [array]
1391
+ else:
1392
+ chunks = array.chunks
1393
+
1394
+ results = []
1395
+ for arr in chunks:
1396
+ if isinstance(arr, pyarrow.ExtensionArray):
1397
+ arr = arr.storage
1398
+ left = np.asarray(arr.field("left"), dtype=self.subtype)
1399
+ right = np.asarray(arr.field("right"), dtype=self.subtype)
1400
+ iarr = IntervalArray.from_arrays(left, right, closed=self.closed)
1401
+ results.append(iarr)
1402
+
1403
+ if not results:
1404
+ return IntervalArray.from_arrays(
1405
+ np.array([], dtype=self.subtype),
1406
+ np.array([], dtype=self.subtype),
1407
+ closed=self.closed,
1408
+ )
1409
+ return IntervalArray._concat_same_type(results)
1410
+
1411
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
1412
+ if not all(isinstance(x, IntervalDtype) for x in dtypes):
1413
+ return None
1414
+
1415
+ closed = cast("IntervalDtype", dtypes[0]).closed
1416
+ if not all(cast("IntervalDtype", x).closed == closed for x in dtypes):
1417
+ return np.dtype(object)
1418
+
1419
+ from pandas.core.dtypes.cast import find_common_type
1420
+
1421
+ common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes])
1422
+ if common == object:
1423
+ return np.dtype(object)
1424
+ return IntervalDtype(common, closed=closed)
1425
+
1426
+ @cache_readonly
1427
+ def index_class(self) -> type_t[IntervalIndex]:
1428
+ from pandas import IntervalIndex
1429
+
1430
+ return IntervalIndex
1431
+
1432
+
1433
+ class NumpyEADtype(ExtensionDtype):
1434
+ """
1435
+ A Pandas ExtensionDtype for NumPy dtypes.
1436
+
1437
+ This is mostly for internal compatibility, and is not especially
1438
+ useful on its own.
1439
+
1440
+ Parameters
1441
+ ----------
1442
+ dtype : object
1443
+ Object to be converted to a NumPy data type object.
1444
+
1445
+ See Also
1446
+ --------
1447
+ numpy.dtype
1448
+ """
1449
+
1450
+ _metadata = ("_dtype",)
1451
+ _supports_2d = False
1452
+ _can_fast_transpose = False
1453
+
1454
+ def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None:
1455
+ if isinstance(dtype, NumpyEADtype):
1456
+ # make constructor idempotent
1457
+ dtype = dtype.numpy_dtype
1458
+ self._dtype = np.dtype(dtype)
1459
+
1460
+ def __repr__(self) -> str:
1461
+ return f"NumpyEADtype({repr(self.name)})"
1462
+
1463
+ @property
1464
+ def numpy_dtype(self) -> np.dtype:
1465
+ """
1466
+ The NumPy dtype this NumpyEADtype wraps.
1467
+ """
1468
+ return self._dtype
1469
+
1470
+ @property
1471
+ def name(self) -> str:
1472
+ """
1473
+ A bit-width name for this data-type.
1474
+ """
1475
+ return self._dtype.name
1476
+
1477
+ @property
1478
+ def type(self) -> type[np.generic]:
1479
+ """
1480
+ The type object used to instantiate a scalar of this NumPy data-type.
1481
+ """
1482
+ return self._dtype.type
1483
+
1484
+ @property
1485
+ def _is_numeric(self) -> bool:
1486
+ # exclude object, str, unicode, void.
1487
+ return self.kind in set("biufc")
1488
+
1489
+ @property
1490
+ def _is_boolean(self) -> bool:
1491
+ return self.kind == "b"
1492
+
1493
+ @classmethod
1494
+ def construct_from_string(cls, string: str) -> NumpyEADtype:
1495
+ try:
1496
+ dtype = np.dtype(string)
1497
+ except TypeError as err:
1498
+ if not isinstance(string, str):
1499
+ msg = f"'construct_from_string' expects a string, got {type(string)}"
1500
+ else:
1501
+ msg = f"Cannot construct a 'NumpyEADtype' from '{string}'"
1502
+ raise TypeError(msg) from err
1503
+ return cls(dtype)
1504
+
1505
+ @classmethod
1506
+ def construct_array_type(cls) -> type_t[NumpyExtensionArray]:
1507
+ """
1508
+ Return the array type associated with this dtype.
1509
+
1510
+ Returns
1511
+ -------
1512
+ type
1513
+ """
1514
+ from pandas.core.arrays import NumpyExtensionArray
1515
+
1516
+ return NumpyExtensionArray
1517
+
1518
+ @property
1519
+ def kind(self) -> str:
1520
+ """
1521
+ A character code (one of 'biufcmMOSUV') identifying the general kind of data.
1522
+ """
1523
+ return self._dtype.kind
1524
+
1525
+ @property
1526
+ def itemsize(self) -> int:
1527
+ """
1528
+ The element size of this data-type object.
1529
+ """
1530
+ return self._dtype.itemsize
1531
+
1532
+
1533
+ class BaseMaskedDtype(ExtensionDtype):
1534
+ """
1535
+ Base class for dtypes for BaseMaskedArray subclasses.
1536
+ """
1537
+
1538
+ base = None
1539
+ type: type
1540
+
1541
+ @property
1542
+ def na_value(self) -> libmissing.NAType:
1543
+ return libmissing.NA
1544
+
1545
+ @cache_readonly
1546
+ def numpy_dtype(self) -> np.dtype:
1547
+ """Return an instance of our numpy dtype"""
1548
+ return np.dtype(self.type)
1549
+
1550
+ @cache_readonly
1551
+ def kind(self) -> str:
1552
+ return self.numpy_dtype.kind
1553
+
1554
+ @cache_readonly
1555
+ def itemsize(self) -> int:
1556
+ """Return the number of bytes in this dtype"""
1557
+ return self.numpy_dtype.itemsize
1558
+
1559
+ @classmethod
1560
+ def construct_array_type(cls) -> type_t[BaseMaskedArray]:
1561
+ """
1562
+ Return the array type associated with this dtype.
1563
+
1564
+ Returns
1565
+ -------
1566
+ type
1567
+ """
1568
+ raise NotImplementedError
1569
+
1570
+ @classmethod
1571
+ def from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype:
1572
+ """
1573
+ Construct the MaskedDtype corresponding to the given numpy dtype.
1574
+ """
1575
+ if dtype.kind == "b":
1576
+ from pandas.core.arrays.boolean import BooleanDtype
1577
+
1578
+ return BooleanDtype()
1579
+ elif dtype.kind in "iu":
1580
+ from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE
1581
+
1582
+ return NUMPY_INT_TO_DTYPE[dtype]
1583
+ elif dtype.kind == "f":
1584
+ from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE
1585
+
1586
+ return NUMPY_FLOAT_TO_DTYPE[dtype]
1587
+ else:
1588
+ raise NotImplementedError(dtype)
1589
+
1590
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
1591
+ # We unwrap any masked dtypes, find the common dtype we would use
1592
+ # for that, then re-mask the result.
1593
+ from pandas.core.dtypes.cast import find_common_type
1594
+
1595
+ new_dtype = find_common_type(
1596
+ [
1597
+ dtype.numpy_dtype if isinstance(dtype, BaseMaskedDtype) else dtype
1598
+ for dtype in dtypes
1599
+ ]
1600
+ )
1601
+ if not isinstance(new_dtype, np.dtype):
1602
+ # If we ever support e.g. Masked[DatetimeArray] then this will change
1603
+ return None
1604
+ try:
1605
+ return type(self).from_numpy_dtype(new_dtype)
1606
+ except (KeyError, NotImplementedError):
1607
+ return None
1608
+
1609
+
1610
+ @register_extension_dtype
1611
+ class SparseDtype(ExtensionDtype):
1612
+ """
1613
+ Dtype for data stored in :class:`SparseArray`.
1614
+
1615
+ This dtype implements the pandas ExtensionDtype interface.
1616
+
1617
+ Parameters
1618
+ ----------
1619
+ dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
1620
+ The dtype of the underlying array storing the non-fill value values.
1621
+ fill_value : scalar, optional
1622
+ The scalar value not stored in the SparseArray. By default, this
1623
+ depends on `dtype`.
1624
+
1625
+ =========== ==========
1626
+ dtype na_value
1627
+ =========== ==========
1628
+ float ``np.nan``
1629
+ int ``0``
1630
+ bool ``False``
1631
+ datetime64 ``pd.NaT``
1632
+ timedelta64 ``pd.NaT``
1633
+ =========== ==========
1634
+
1635
+ The default value may be overridden by specifying a `fill_value`.
1636
+
1637
+ Attributes
1638
+ ----------
1639
+ None
1640
+
1641
+ Methods
1642
+ -------
1643
+ None
1644
+
1645
+ Examples
1646
+ --------
1647
+ >>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0))
1648
+ >>> ser
1649
+ 0 1
1650
+ 1 0
1651
+ 2 0
1652
+ dtype: Sparse[int64, 0]
1653
+ >>> ser.sparse.density
1654
+ 0.3333333333333333
1655
+ """
1656
+
1657
+ _is_immutable = True
1658
+
1659
+ # We include `_is_na_fill_value` in the metadata to avoid hash collisions
1660
+ # between SparseDtype(float, 0.0) and SparseDtype(float, nan).
1661
+ # Without is_na_fill_value in the comparison, those would be equal since
1662
+ # hash(nan) is (sometimes?) 0.
1663
+ _metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
1664
+
1665
+ def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
1666
+ if isinstance(dtype, type(self)):
1667
+ if fill_value is None:
1668
+ fill_value = dtype.fill_value
1669
+ dtype = dtype.subtype
1670
+
1671
+ from pandas.core.dtypes.common import (
1672
+ is_string_dtype,
1673
+ pandas_dtype,
1674
+ )
1675
+ from pandas.core.dtypes.missing import na_value_for_dtype
1676
+
1677
+ dtype = pandas_dtype(dtype)
1678
+ if is_string_dtype(dtype):
1679
+ dtype = np.dtype("object")
1680
+ if not isinstance(dtype, np.dtype):
1681
+ # GH#53160
1682
+ raise TypeError("SparseDtype subtype must be a numpy dtype")
1683
+
1684
+ if fill_value is None:
1685
+ fill_value = na_value_for_dtype(dtype)
1686
+
1687
+ self._dtype = dtype
1688
+ self._fill_value = fill_value
1689
+ self._check_fill_value()
1690
+
1691
+ def __hash__(self) -> int:
1692
+ # Python3 doesn't inherit __hash__ when a base class overrides
1693
+ # __eq__, so we explicitly do it here.
1694
+ return super().__hash__()
1695
+
1696
+ def __eq__(self, other: object) -> bool:
1697
+ # We have to override __eq__ to handle NA values in _metadata.
1698
+ # The base class does simple == checks, which fail for NA.
1699
+ if isinstance(other, str):
1700
+ try:
1701
+ other = self.construct_from_string(other)
1702
+ except TypeError:
1703
+ return False
1704
+
1705
+ if isinstance(other, type(self)):
1706
+ subtype = self.subtype == other.subtype
1707
+ if self._is_na_fill_value:
1708
+ # this case is complicated by two things:
1709
+ # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
1710
+ # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
1711
+ # i.e. we want to treat any floating-point NaN as equal, but
1712
+ # not a floating-point NaN and a datetime NaT.
1713
+ fill_value = (
1714
+ other._is_na_fill_value
1715
+ and isinstance(self.fill_value, type(other.fill_value))
1716
+ or isinstance(other.fill_value, type(self.fill_value))
1717
+ )
1718
+ else:
1719
+ with warnings.catch_warnings():
1720
+ # Ignore spurious numpy warning
1721
+ warnings.filterwarnings(
1722
+ "ignore",
1723
+ "elementwise comparison failed",
1724
+ category=DeprecationWarning,
1725
+ )
1726
+
1727
+ fill_value = self.fill_value == other.fill_value
1728
+
1729
+ return subtype and fill_value
1730
+ return False
1731
+
1732
+ @property
1733
+ def fill_value(self):
1734
+ """
1735
+ The fill value of the array.
1736
+
1737
+ Converting the SparseArray to a dense ndarray will fill the
1738
+ array with this value.
1739
+
1740
+ .. warning::
1741
+
1742
+ It's possible to end up with a SparseArray that has ``fill_value``
1743
+ values in ``sp_values``. This can occur, for example, when setting
1744
+ ``SparseArray.fill_value`` directly.
1745
+ """
1746
+ return self._fill_value
1747
+
1748
+ def _check_fill_value(self) -> None:
1749
+ if not lib.is_scalar(self._fill_value):
1750
+ raise ValueError(
1751
+ f"fill_value must be a scalar. Got {self._fill_value} instead"
1752
+ )
1753
+
1754
+ from pandas.core.dtypes.cast import can_hold_element
1755
+ from pandas.core.dtypes.missing import (
1756
+ is_valid_na_for_dtype,
1757
+ isna,
1758
+ )
1759
+
1760
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
1761
+
1762
+ # GH#23124 require fill_value and subtype to match
1763
+ val = self._fill_value
1764
+ if isna(val):
1765
+ if not is_valid_na_for_dtype(val, self.subtype):
1766
+ warnings.warn(
1767
+ "Allowing arbitrary scalar fill_value in SparseDtype is "
1768
+ "deprecated. In a future version, the fill_value must be "
1769
+ "a valid value for the SparseDtype.subtype.",
1770
+ FutureWarning,
1771
+ stacklevel=find_stack_level(),
1772
+ )
1773
+ else:
1774
+ dummy = np.empty(0, dtype=self.subtype)
1775
+ dummy = ensure_wrapped_if_datetimelike(dummy)
1776
+
1777
+ if not can_hold_element(dummy, val):
1778
+ warnings.warn(
1779
+ "Allowing arbitrary scalar fill_value in SparseDtype is "
1780
+ "deprecated. In a future version, the fill_value must be "
1781
+ "a valid value for the SparseDtype.subtype.",
1782
+ FutureWarning,
1783
+ stacklevel=find_stack_level(),
1784
+ )
1785
+
1786
+ @property
1787
+ def _is_na_fill_value(self) -> bool:
1788
+ from pandas import isna
1789
+
1790
+ return isna(self.fill_value)
1791
+
1792
+ @property
1793
+ def _is_numeric(self) -> bool:
1794
+ return not self.subtype == object
1795
+
1796
+ @property
1797
+ def _is_boolean(self) -> bool:
1798
+ return self.subtype.kind == "b"
1799
+
1800
+ @property
1801
+ def kind(self) -> str:
1802
+ """
1803
+ The sparse kind. Either 'integer', or 'block'.
1804
+ """
1805
+ return self.subtype.kind
1806
+
1807
+ @property
1808
+ def type(self):
1809
+ return self.subtype.type
1810
+
1811
+ @property
1812
+ def subtype(self):
1813
+ return self._dtype
1814
+
1815
+ @property
1816
+ def name(self) -> str:
1817
+ return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]"
1818
+
1819
+ def __repr__(self) -> str:
1820
+ return self.name
1821
+
1822
+ @classmethod
1823
+ def construct_array_type(cls) -> type_t[SparseArray]:
1824
+ """
1825
+ Return the array type associated with this dtype.
1826
+
1827
+ Returns
1828
+ -------
1829
+ type
1830
+ """
1831
+ from pandas.core.arrays.sparse.array import SparseArray
1832
+
1833
+ return SparseArray
1834
+
1835
+ @classmethod
1836
+ def construct_from_string(cls, string: str) -> SparseDtype:
1837
+ """
1838
+ Construct a SparseDtype from a string form.
1839
+
1840
+ Parameters
1841
+ ----------
1842
+ string : str
1843
+ Can take the following forms.
1844
+
1845
+ string dtype
1846
+ ================ ============================
1847
+ 'int' SparseDtype[np.int64, 0]
1848
+ 'Sparse' SparseDtype[np.float64, nan]
1849
+ 'Sparse[int]' SparseDtype[np.int64, 0]
1850
+ 'Sparse[int, 0]' SparseDtype[np.int64, 0]
1851
+ ================ ============================
1852
+
1853
+ It is not possible to specify non-default fill values
1854
+ with a string. An argument like ``'Sparse[int, 1]'``
1855
+ will raise a ``TypeError`` because the default fill value
1856
+ for integers is 0.
1857
+
1858
+ Returns
1859
+ -------
1860
+ SparseDtype
1861
+ """
1862
+ if not isinstance(string, str):
1863
+ raise TypeError(
1864
+ f"'construct_from_string' expects a string, got {type(string)}"
1865
+ )
1866
+ msg = f"Cannot construct a 'SparseDtype' from '{string}'"
1867
+ if string.startswith("Sparse"):
1868
+ try:
1869
+ sub_type, has_fill_value = cls._parse_subtype(string)
1870
+ except ValueError as err:
1871
+ raise TypeError(msg) from err
1872
+ else:
1873
+ result = SparseDtype(sub_type)
1874
+ msg = (
1875
+ f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "
1876
+ "looks like the fill_value in the string is not "
1877
+ "the default for the dtype. Non-default fill_values "
1878
+ "are not supported. Use the 'SparseDtype()' "
1879
+ "constructor instead."
1880
+ )
1881
+ if has_fill_value and str(result) != string:
1882
+ raise TypeError(msg)
1883
+ return result
1884
+ else:
1885
+ raise TypeError(msg)
1886
+
1887
+ @staticmethod
1888
+ def _parse_subtype(dtype: str) -> tuple[str, bool]:
1889
+ """
1890
+ Parse a string to get the subtype
1891
+
1892
+ Parameters
1893
+ ----------
1894
+ dtype : str
1895
+ A string like
1896
+
1897
+ * Sparse[subtype]
1898
+ * Sparse[subtype, fill_value]
1899
+
1900
+ Returns
1901
+ -------
1902
+ subtype : str
1903
+
1904
+ Raises
1905
+ ------
1906
+ ValueError
1907
+ When the subtype cannot be extracted.
1908
+ """
1909
+ xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
1910
+ m = xpr.match(dtype)
1911
+ has_fill_value = False
1912
+ if m:
1913
+ subtype = m.groupdict()["subtype"]
1914
+ has_fill_value = bool(m.groupdict()["fill_value"])
1915
+ elif dtype == "Sparse":
1916
+ subtype = "float64"
1917
+ else:
1918
+ raise ValueError(f"Cannot parse {dtype}")
1919
+ return subtype, has_fill_value
1920
+
1921
+ @classmethod
1922
+ def is_dtype(cls, dtype: object) -> bool:
1923
+ dtype = getattr(dtype, "dtype", dtype)
1924
+ if isinstance(dtype, str) and dtype.startswith("Sparse"):
1925
+ sub_type, _ = cls._parse_subtype(dtype)
1926
+ dtype = np.dtype(sub_type)
1927
+ elif isinstance(dtype, cls):
1928
+ return True
1929
+ return isinstance(dtype, np.dtype) or dtype == "Sparse"
1930
+
1931
+ def update_dtype(self, dtype) -> SparseDtype:
1932
+ """
1933
+ Convert the SparseDtype to a new dtype.
1934
+
1935
+ This takes care of converting the ``fill_value``.
1936
+
1937
+ Parameters
1938
+ ----------
1939
+ dtype : Union[str, numpy.dtype, SparseDtype]
1940
+ The new dtype to use.
1941
+
1942
+ * For a SparseDtype, it is simply returned
1943
+ * For a NumPy dtype (or str), the current fill value
1944
+ is converted to the new dtype, and a SparseDtype
1945
+ with `dtype` and the new fill value is returned.
1946
+
1947
+ Returns
1948
+ -------
1949
+ SparseDtype
1950
+ A new SparseDtype with the correct `dtype` and fill value
1951
+ for that `dtype`.
1952
+
1953
+ Raises
1954
+ ------
1955
+ ValueError
1956
+ When the current fill value cannot be converted to the
1957
+ new `dtype` (e.g. trying to convert ``np.nan`` to an
1958
+ integer dtype).
1959
+
1960
+
1961
+ Examples
1962
+ --------
1963
+ >>> SparseDtype(int, 0).update_dtype(float)
1964
+ Sparse[float64, 0.0]
1965
+
1966
+ >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
1967
+ Sparse[float64, nan]
1968
+ """
1969
+ from pandas.core.dtypes.astype import astype_array
1970
+ from pandas.core.dtypes.common import pandas_dtype
1971
+
1972
+ cls = type(self)
1973
+ dtype = pandas_dtype(dtype)
1974
+
1975
+ if not isinstance(dtype, cls):
1976
+ if not isinstance(dtype, np.dtype):
1977
+ raise TypeError("sparse arrays of extension dtypes not supported")
1978
+
1979
+ fv_asarray = np.atleast_1d(np.array(self.fill_value))
1980
+ fvarr = astype_array(fv_asarray, dtype)
1981
+ # NB: not fv_0d.item(), as that casts dt64->int
1982
+ fill_value = fvarr[0]
1983
+ dtype = cls(dtype, fill_value=fill_value)
1984
+
1985
+ return dtype
1986
+
1987
+ @property
1988
+ def _subtype_with_str(self):
1989
+ """
1990
+ Whether the SparseDtype's subtype should be considered ``str``.
1991
+
1992
+ Typically, pandas will store string data in an object-dtype array.
1993
+ When converting values to a dtype, e.g. in ``.astype``, we need to
1994
+ be more specific, we need the actual underlying type.
1995
+
1996
+ Returns
1997
+ -------
1998
+ >>> SparseDtype(int, 1)._subtype_with_str
1999
+ dtype('int64')
2000
+
2001
+ >>> SparseDtype(object, 1)._subtype_with_str
2002
+ dtype('O')
2003
+
2004
+ >>> dtype = SparseDtype(str, '')
2005
+ >>> dtype.subtype
2006
+ dtype('O')
2007
+
2008
+ >>> dtype._subtype_with_str
2009
+ <class 'str'>
2010
+ """
2011
+ if isinstance(self.fill_value, str):
2012
+ return type(self.fill_value)
2013
+ return self.subtype
2014
+
2015
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
2016
+ # TODO for now only handle SparseDtypes and numpy dtypes => extend
2017
+ # with other compatible extension dtypes
2018
+ from pandas.core.dtypes.cast import np_find_common_type
2019
+
2020
+ if any(
2021
+ isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
2022
+ for x in dtypes
2023
+ ):
2024
+ return None
2025
+
2026
+ fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
2027
+ fill_value = fill_values[0]
2028
+
2029
+ from pandas import isna
2030
+
2031
+ # np.nan isn't a singleton, so we may end up with multiple
2032
+ # NaNs here, so we ignore the all NA case too.
2033
+ if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
2034
+ warnings.warn(
2035
+ "Concatenating sparse arrays with multiple fill "
2036
+ f"values: '{fill_values}'. Picking the first and "
2037
+ "converting the rest.",
2038
+ PerformanceWarning,
2039
+ stacklevel=find_stack_level(),
2040
+ )
2041
+
2042
+ np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes)
2043
+ return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value)
2044
+
2045
+
2046
+ @register_extension_dtype
2047
+ class ArrowDtype(StorageExtensionDtype):
2048
+ """
2049
+ An ExtensionDtype for PyArrow data types.
2050
+
2051
+ .. warning::
2052
+
2053
+ ArrowDtype is considered experimental. The implementation and
2054
+ parts of the API may change without warning.
2055
+
2056
+ While most ``dtype`` arguments can accept the "string"
2057
+ constructor, e.g. ``"int64[pyarrow]"``, ArrowDtype is useful
2058
+ if the data type contains parameters like ``pyarrow.timestamp``.
2059
+
2060
+ Parameters
2061
+ ----------
2062
+ pyarrow_dtype : pa.DataType
2063
+ An instance of a `pyarrow.DataType <https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions>`__.
2064
+
2065
+ Attributes
2066
+ ----------
2067
+ pyarrow_dtype
2068
+
2069
+ Methods
2070
+ -------
2071
+ None
2072
+
2073
+ Returns
2074
+ -------
2075
+ ArrowDtype
2076
+
2077
+ Examples
2078
+ --------
2079
+ >>> import pyarrow as pa
2080
+ >>> pd.ArrowDtype(pa.int64())
2081
+ int64[pyarrow]
2082
+
2083
+ Types with parameters must be constructed with ArrowDtype.
2084
+
2085
+ >>> pd.ArrowDtype(pa.timestamp("s", tz="America/New_York"))
2086
+ timestamp[s, tz=America/New_York][pyarrow]
2087
+ >>> pd.ArrowDtype(pa.list_(pa.int64()))
2088
+ list<item: int64>[pyarrow]
2089
+ """
2090
+
2091
+ _metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment]
2092
+
2093
+ def __init__(self, pyarrow_dtype: pa.DataType) -> None:
2094
+ super().__init__("pyarrow")
2095
+ if pa_version_under10p1:
2096
+ raise ImportError("pyarrow>=10.0.1 is required for ArrowDtype")
2097
+ if not isinstance(pyarrow_dtype, pa.DataType):
2098
+ raise ValueError(
2099
+ f"pyarrow_dtype ({pyarrow_dtype}) must be an instance "
2100
+ f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead."
2101
+ )
2102
+ self.pyarrow_dtype = pyarrow_dtype
2103
+
2104
+ def __repr__(self) -> str:
2105
+ return self.name
2106
+
2107
+ def __hash__(self) -> int:
2108
+ # make myself hashable
2109
+ return hash(str(self))
2110
+
2111
+ def __eq__(self, other: object) -> bool:
2112
+ if not isinstance(other, type(self)):
2113
+ return super().__eq__(other)
2114
+ return self.pyarrow_dtype == other.pyarrow_dtype
2115
+
2116
+ @property
2117
+ def type(self):
2118
+ """
2119
+ Returns associated scalar type.
2120
+ """
2121
+ pa_type = self.pyarrow_dtype
2122
+ if pa.types.is_integer(pa_type):
2123
+ return int
2124
+ elif pa.types.is_floating(pa_type):
2125
+ return float
2126
+ elif pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type):
2127
+ return str
2128
+ elif (
2129
+ pa.types.is_binary(pa_type)
2130
+ or pa.types.is_fixed_size_binary(pa_type)
2131
+ or pa.types.is_large_binary(pa_type)
2132
+ ):
2133
+ return bytes
2134
+ elif pa.types.is_boolean(pa_type):
2135
+ return bool
2136
+ elif pa.types.is_duration(pa_type):
2137
+ if pa_type.unit == "ns":
2138
+ return Timedelta
2139
+ else:
2140
+ return timedelta
2141
+ elif pa.types.is_timestamp(pa_type):
2142
+ if pa_type.unit == "ns":
2143
+ return Timestamp
2144
+ else:
2145
+ return datetime
2146
+ elif pa.types.is_date(pa_type):
2147
+ return date
2148
+ elif pa.types.is_time(pa_type):
2149
+ return time
2150
+ elif pa.types.is_decimal(pa_type):
2151
+ return Decimal
2152
+ elif pa.types.is_dictionary(pa_type):
2153
+ # TODO: Potentially change this & CategoricalDtype.type to
2154
+ # something more representative of the scalar
2155
+ return CategoricalDtypeType
2156
+ elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type):
2157
+ return list
2158
+ elif pa.types.is_fixed_size_list(pa_type):
2159
+ return list
2160
+ elif pa.types.is_map(pa_type):
2161
+ return list
2162
+ elif pa.types.is_struct(pa_type):
2163
+ return dict
2164
+ elif pa.types.is_null(pa_type):
2165
+ # TODO: None? pd.NA? pa.null?
2166
+ return type(pa_type)
2167
+ elif isinstance(pa_type, pa.ExtensionType):
2168
+ return type(self)(pa_type.storage_type).type
2169
+ raise NotImplementedError(pa_type)
2170
+
2171
+ @property
2172
+ def name(self) -> str: # type: ignore[override]
2173
+ """
2174
+ A string identifying the data type.
2175
+ """
2176
+ return f"{str(self.pyarrow_dtype)}[{self.storage}]"
2177
+
2178
+ @cache_readonly
2179
+ def numpy_dtype(self) -> np.dtype:
2180
+ """Return an instance of the related numpy dtype"""
2181
+ if pa.types.is_timestamp(self.pyarrow_dtype):
2182
+ # pa.timestamp(unit).to_pandas_dtype() returns ns units
2183
+ # regardless of the pyarrow timestamp units.
2184
+ # This can be removed if/when pyarrow addresses it:
2185
+ # https://github.com/apache/arrow/issues/34462
2186
+ return np.dtype(f"datetime64[{self.pyarrow_dtype.unit}]")
2187
+ if pa.types.is_duration(self.pyarrow_dtype):
2188
+ # pa.duration(unit).to_pandas_dtype() returns ns units
2189
+ # regardless of the pyarrow duration units
2190
+ # This can be removed if/when pyarrow addresses it:
2191
+ # https://github.com/apache/arrow/issues/34462
2192
+ return np.dtype(f"timedelta64[{self.pyarrow_dtype.unit}]")
2193
+ if pa.types.is_string(self.pyarrow_dtype) or pa.types.is_large_string(
2194
+ self.pyarrow_dtype
2195
+ ):
2196
+ # pa.string().to_pandas_dtype() = object which we don't want
2197
+ return np.dtype(str)
2198
+ try:
2199
+ return np.dtype(self.pyarrow_dtype.to_pandas_dtype())
2200
+ except (NotImplementedError, TypeError):
2201
+ return np.dtype(object)
2202
+
2203
+ @cache_readonly
2204
+ def kind(self) -> str:
2205
+ if pa.types.is_timestamp(self.pyarrow_dtype):
2206
+ # To mirror DatetimeTZDtype
2207
+ return "M"
2208
+ return self.numpy_dtype.kind
2209
+
2210
+ @cache_readonly
2211
+ def itemsize(self) -> int:
2212
+ """Return the number of bytes in this dtype"""
2213
+ return self.numpy_dtype.itemsize
2214
+
2215
+ @classmethod
2216
+ def construct_array_type(cls) -> type_t[ArrowExtensionArray]:
2217
+ """
2218
+ Return the array type associated with this dtype.
2219
+
2220
+ Returns
2221
+ -------
2222
+ type
2223
+ """
2224
+ from pandas.core.arrays.arrow import ArrowExtensionArray
2225
+
2226
+ return ArrowExtensionArray
2227
+
2228
+ @classmethod
2229
+ def construct_from_string(cls, string: str) -> ArrowDtype:
2230
+ """
2231
+ Construct this type from a string.
2232
+
2233
+ Parameters
2234
+ ----------
2235
+ string : str
2236
+ string should follow the format f"{pyarrow_type}[pyarrow]"
2237
+ e.g. int64[pyarrow]
2238
+ """
2239
+ if not isinstance(string, str):
2240
+ raise TypeError(
2241
+ f"'construct_from_string' expects a string, got {type(string)}"
2242
+ )
2243
+ if not string.endswith("[pyarrow]"):
2244
+ raise TypeError(f"'{string}' must end with '[pyarrow]'")
2245
+ if string == "string[pyarrow]":
2246
+ # Ensure Registry.find skips ArrowDtype to use StringDtype instead
2247
+ raise TypeError("string[pyarrow] should be constructed by StringDtype")
2248
+
2249
+ base_type = string[:-9] # get rid of "[pyarrow]"
2250
+ try:
2251
+ pa_dtype = pa.type_for_alias(base_type)
2252
+ except ValueError as err:
2253
+ has_parameters = re.search(r"[\[\(].*[\]\)]", base_type)
2254
+ if has_parameters:
2255
+ # Fallback to try common temporal types
2256
+ try:
2257
+ return cls._parse_temporal_dtype_string(base_type)
2258
+ except (NotImplementedError, ValueError):
2259
+ # Fall through to raise with nice exception message below
2260
+ pass
2261
+
2262
+ raise NotImplementedError(
2263
+ "Passing pyarrow type specific parameters "
2264
+ f"({has_parameters.group()}) in the string is not supported. "
2265
+ "Please construct an ArrowDtype object with a pyarrow_dtype "
2266
+ "instance with specific parameters."
2267
+ ) from err
2268
+ raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err
2269
+ return cls(pa_dtype)
2270
+
2271
+ # TODO(arrow#33642): This can be removed once supported by pyarrow
2272
+ @classmethod
2273
+ def _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype:
2274
+ """
2275
+ Construct a temporal ArrowDtype from string.
2276
+ """
2277
+ # we assume
2278
+ # 1) "[pyarrow]" has already been stripped from the end of our string.
2279
+ # 2) we know "[" is present
2280
+ head, tail = string.split("[", 1)
2281
+
2282
+ if not tail.endswith("]"):
2283
+ raise ValueError
2284
+ tail = tail[:-1]
2285
+
2286
+ if head == "timestamp":
2287
+ assert "," in tail # otherwise type_for_alias should work
2288
+ unit, tz = tail.split(",", 1)
2289
+ unit = unit.strip()
2290
+ tz = tz.strip()
2291
+ if tz.startswith("tz="):
2292
+ tz = tz[3:]
2293
+
2294
+ pa_type = pa.timestamp(unit, tz=tz)
2295
+ dtype = cls(pa_type)
2296
+ return dtype
2297
+
2298
+ raise NotImplementedError(string)
2299
+
2300
+ @property
2301
+ def _is_numeric(self) -> bool:
2302
+ """
2303
+ Whether columns with this dtype should be considered numeric.
2304
+ """
2305
+ # TODO: pa.types.is_boolean?
2306
+ return (
2307
+ pa.types.is_integer(self.pyarrow_dtype)
2308
+ or pa.types.is_floating(self.pyarrow_dtype)
2309
+ or pa.types.is_decimal(self.pyarrow_dtype)
2310
+ )
2311
+
2312
+ @property
2313
+ def _is_boolean(self) -> bool:
2314
+ """
2315
+ Whether this dtype should be considered boolean.
2316
+ """
2317
+ return pa.types.is_boolean(self.pyarrow_dtype)
2318
+
2319
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
2320
+ # We unwrap any masked dtypes, find the common dtype we would use
2321
+ # for that, then re-mask the result.
2322
+ # Mirrors BaseMaskedDtype
2323
+ from pandas.core.dtypes.cast import find_common_type
2324
+
2325
+ null_dtype = type(self)(pa.null())
2326
+
2327
+ new_dtype = find_common_type(
2328
+ [
2329
+ dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype
2330
+ for dtype in dtypes
2331
+ if dtype != null_dtype
2332
+ ]
2333
+ )
2334
+ if not isinstance(new_dtype, np.dtype):
2335
+ return None
2336
+ try:
2337
+ pa_dtype = pa.from_numpy_dtype(new_dtype)
2338
+ return type(self)(pa_dtype)
2339
+ except NotImplementedError:
2340
+ return None
2341
+
2342
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray):
2343
+ """
2344
+ Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
2345
+ """
2346
+ array_class = self.construct_array_type()
2347
+ arr = array.cast(self.pyarrow_dtype, safe=True)
2348
+ return array_class(arr)
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/inference.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ basic inference routines """
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections import abc
6
+ from numbers import Number
7
+ import re
8
+ from re import Pattern
9
+ from typing import TYPE_CHECKING
10
+
11
+ import numpy as np
12
+
13
+ from pandas._libs import lib
14
+
15
+ if TYPE_CHECKING:
16
+ from collections.abc import Hashable
17
+
18
+ from pandas._typing import TypeGuard
19
+
20
+ is_bool = lib.is_bool
21
+
22
+ is_integer = lib.is_integer
23
+
24
+ is_float = lib.is_float
25
+
26
+ is_complex = lib.is_complex
27
+
28
+ is_scalar = lib.is_scalar
29
+
30
+ is_decimal = lib.is_decimal
31
+
32
+ is_interval = lib.is_interval
33
+
34
+ is_list_like = lib.is_list_like
35
+
36
+ is_iterator = lib.is_iterator
37
+
38
+
39
+ def is_number(obj) -> TypeGuard[Number | np.number]:
40
+ """
41
+ Check if the object is a number.
42
+
43
+ Returns True when the object is a number, and False if is not.
44
+
45
+ Parameters
46
+ ----------
47
+ obj : any type
48
+ The object to check if is a number.
49
+
50
+ Returns
51
+ -------
52
+ bool
53
+ Whether `obj` is a number or not.
54
+
55
+ See Also
56
+ --------
57
+ api.types.is_integer: Checks a subgroup of numbers.
58
+
59
+ Examples
60
+ --------
61
+ >>> from pandas.api.types import is_number
62
+ >>> is_number(1)
63
+ True
64
+ >>> is_number(7.15)
65
+ True
66
+
67
+ Booleans are valid because they are int subclass.
68
+
69
+ >>> is_number(False)
70
+ True
71
+
72
+ >>> is_number("foo")
73
+ False
74
+ >>> is_number("5")
75
+ False
76
+ """
77
+ return isinstance(obj, (Number, np.number))
78
+
79
+
80
+ def iterable_not_string(obj) -> bool:
81
+ """
82
+ Check if the object is an iterable but not a string.
83
+
84
+ Parameters
85
+ ----------
86
+ obj : The object to check.
87
+
88
+ Returns
89
+ -------
90
+ is_iter_not_string : bool
91
+ Whether `obj` is a non-string iterable.
92
+
93
+ Examples
94
+ --------
95
+ >>> iterable_not_string([1, 2, 3])
96
+ True
97
+ >>> iterable_not_string("foo")
98
+ False
99
+ >>> iterable_not_string(1)
100
+ False
101
+ """
102
+ return isinstance(obj, abc.Iterable) and not isinstance(obj, str)
103
+
104
+
105
+ def is_file_like(obj) -> bool:
106
+ """
107
+ Check if the object is a file-like object.
108
+
109
+ For objects to be considered file-like, they must
110
+ be an iterator AND have either a `read` and/or `write`
111
+ method as an attribute.
112
+
113
+ Note: file-like objects must be iterable, but
114
+ iterable objects need not be file-like.
115
+
116
+ Parameters
117
+ ----------
118
+ obj : The object to check
119
+
120
+ Returns
121
+ -------
122
+ bool
123
+ Whether `obj` has file-like properties.
124
+
125
+ Examples
126
+ --------
127
+ >>> import io
128
+ >>> from pandas.api.types import is_file_like
129
+ >>> buffer = io.StringIO("data")
130
+ >>> is_file_like(buffer)
131
+ True
132
+ >>> is_file_like([1, 2, 3])
133
+ False
134
+ """
135
+ if not (hasattr(obj, "read") or hasattr(obj, "write")):
136
+ return False
137
+
138
+ return bool(hasattr(obj, "__iter__"))
139
+
140
+
141
+ def is_re(obj) -> TypeGuard[Pattern]:
142
+ """
143
+ Check if the object is a regex pattern instance.
144
+
145
+ Parameters
146
+ ----------
147
+ obj : The object to check
148
+
149
+ Returns
150
+ -------
151
+ bool
152
+ Whether `obj` is a regex pattern.
153
+
154
+ Examples
155
+ --------
156
+ >>> from pandas.api.types import is_re
157
+ >>> import re
158
+ >>> is_re(re.compile(".*"))
159
+ True
160
+ >>> is_re("foo")
161
+ False
162
+ """
163
+ return isinstance(obj, Pattern)
164
+
165
+
166
+ def is_re_compilable(obj) -> bool:
167
+ """
168
+ Check if the object can be compiled into a regex pattern instance.
169
+
170
+ Parameters
171
+ ----------
172
+ obj : The object to check
173
+
174
+ Returns
175
+ -------
176
+ bool
177
+ Whether `obj` can be compiled as a regex pattern.
178
+
179
+ Examples
180
+ --------
181
+ >>> from pandas.api.types import is_re_compilable
182
+ >>> is_re_compilable(".*")
183
+ True
184
+ >>> is_re_compilable(1)
185
+ False
186
+ """
187
+ try:
188
+ re.compile(obj)
189
+ except TypeError:
190
+ return False
191
+ else:
192
+ return True
193
+
194
+
195
+ def is_array_like(obj) -> bool:
196
+ """
197
+ Check if the object is array-like.
198
+
199
+ For an object to be considered array-like, it must be list-like and
200
+ have a `dtype` attribute.
201
+
202
+ Parameters
203
+ ----------
204
+ obj : The object to check
205
+
206
+ Returns
207
+ -------
208
+ is_array_like : bool
209
+ Whether `obj` has array-like properties.
210
+
211
+ Examples
212
+ --------
213
+ >>> is_array_like(np.array([1, 2, 3]))
214
+ True
215
+ >>> is_array_like(pd.Series(["a", "b"]))
216
+ True
217
+ >>> is_array_like(pd.Index(["2016-01-01"]))
218
+ True
219
+ >>> is_array_like([1, 2, 3])
220
+ False
221
+ >>> is_array_like(("a", "b"))
222
+ False
223
+ """
224
+ return is_list_like(obj) and hasattr(obj, "dtype")
225
+
226
+
227
+ def is_nested_list_like(obj) -> bool:
228
+ """
229
+ Check if the object is list-like, and that all of its elements
230
+ are also list-like.
231
+
232
+ Parameters
233
+ ----------
234
+ obj : The object to check
235
+
236
+ Returns
237
+ -------
238
+ is_list_like : bool
239
+ Whether `obj` has list-like properties.
240
+
241
+ Examples
242
+ --------
243
+ >>> is_nested_list_like([[1, 2, 3]])
244
+ True
245
+ >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])
246
+ True
247
+ >>> is_nested_list_like(["foo"])
248
+ False
249
+ >>> is_nested_list_like([])
250
+ False
251
+ >>> is_nested_list_like([[1, 2, 3], 1])
252
+ False
253
+
254
+ Notes
255
+ -----
256
+ This won't reliably detect whether a consumable iterator (e. g.
257
+ a generator) is a nested-list-like without consuming the iterator.
258
+ To avoid consuming it, we always return False if the outer container
259
+ doesn't define `__len__`.
260
+
261
+ See Also
262
+ --------
263
+ is_list_like
264
+ """
265
+ return (
266
+ is_list_like(obj)
267
+ and hasattr(obj, "__len__")
268
+ and len(obj) > 0
269
+ and all(is_list_like(item) for item in obj)
270
+ )
271
+
272
+
273
+ def is_dict_like(obj) -> bool:
274
+ """
275
+ Check if the object is dict-like.
276
+
277
+ Parameters
278
+ ----------
279
+ obj : The object to check
280
+
281
+ Returns
282
+ -------
283
+ bool
284
+ Whether `obj` has dict-like properties.
285
+
286
+ Examples
287
+ --------
288
+ >>> from pandas.api.types import is_dict_like
289
+ >>> is_dict_like({1: 2})
290
+ True
291
+ >>> is_dict_like([1, 2, 3])
292
+ False
293
+ >>> is_dict_like(dict)
294
+ False
295
+ >>> is_dict_like(dict())
296
+ True
297
+ """
298
+ dict_like_attrs = ("__getitem__", "keys", "__contains__")
299
+ return (
300
+ all(hasattr(obj, attr) for attr in dict_like_attrs)
301
+ # [GH 25196] exclude classes
302
+ and not isinstance(obj, type)
303
+ )
304
+
305
+
306
+ def is_named_tuple(obj) -> bool:
307
+ """
308
+ Check if the object is a named tuple.
309
+
310
+ Parameters
311
+ ----------
312
+ obj : The object to check
313
+
314
+ Returns
315
+ -------
316
+ bool
317
+ Whether `obj` is a named tuple.
318
+
319
+ Examples
320
+ --------
321
+ >>> from collections import namedtuple
322
+ >>> from pandas.api.types import is_named_tuple
323
+ >>> Point = namedtuple("Point", ["x", "y"])
324
+ >>> p = Point(1, 2)
325
+ >>>
326
+ >>> is_named_tuple(p)
327
+ True
328
+ >>> is_named_tuple((1, 2))
329
+ False
330
+ """
331
+ return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields")
332
+
333
+
334
+ def is_hashable(obj) -> TypeGuard[Hashable]:
335
+ """
336
+ Return True if hash(obj) will succeed, False otherwise.
337
+
338
+ Some types will pass a test against collections.abc.Hashable but fail when
339
+ they are actually hashed with hash().
340
+
341
+ Distinguish between these and other types by trying the call to hash() and
342
+ seeing if they raise TypeError.
343
+
344
+ Returns
345
+ -------
346
+ bool
347
+
348
+ Examples
349
+ --------
350
+ >>> import collections
351
+ >>> from pandas.api.types import is_hashable
352
+ >>> a = ([],)
353
+ >>> isinstance(a, collections.abc.Hashable)
354
+ True
355
+ >>> is_hashable(a)
356
+ False
357
+ """
358
+ # Unfortunately, we can't use isinstance(obj, collections.abc.Hashable),
359
+ # which can be faster than calling hash. That is because numpy scalars
360
+ # fail this test.
361
+
362
+ # Reconsider this decision once this numpy bug is fixed:
363
+ # https://github.com/numpy/numpy/issues/5562
364
+
365
+ try:
366
+ hash(obj)
367
+ except TypeError:
368
+ return False
369
+ else:
370
+ return True
371
+
372
+
373
+ def is_sequence(obj) -> bool:
374
+ """
375
+ Check if the object is a sequence of objects.
376
+ String types are not included as sequences here.
377
+
378
+ Parameters
379
+ ----------
380
+ obj : The object to check
381
+
382
+ Returns
383
+ -------
384
+ is_sequence : bool
385
+ Whether `obj` is a sequence of objects.
386
+
387
+ Examples
388
+ --------
389
+ >>> l = [1, 2, 3]
390
+ >>>
391
+ >>> is_sequence(l)
392
+ True
393
+ >>> is_sequence(iter(l))
394
+ False
395
+ """
396
+ try:
397
+ iter(obj) # Can iterate over it.
398
+ len(obj) # Has a length associated with it.
399
+ return not isinstance(obj, (str, bytes))
400
+ except (TypeError, AttributeError):
401
+ return False
402
+
403
+
404
+ def is_dataclass(item) -> bool:
405
+ """
406
+ Checks if the object is a data-class instance
407
+
408
+ Parameters
409
+ ----------
410
+ item : object
411
+
412
+ Returns
413
+ --------
414
+ is_dataclass : bool
415
+ True if the item is an instance of a data-class,
416
+ will return false if you pass the data class itself
417
+
418
+ Examples
419
+ --------
420
+ >>> from dataclasses import dataclass
421
+ >>> @dataclass
422
+ ... class Point:
423
+ ... x: int
424
+ ... y: int
425
+
426
+ >>> is_dataclass(Point)
427
+ False
428
+ >>> is_dataclass(Point(0,2))
429
+ True
430
+
431
+ """
432
+ try:
433
+ import dataclasses
434
+
435
+ return dataclasses.is_dataclass(item) and not isinstance(item, type)
436
+ except ImportError:
437
+ return False
vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/missing.py ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ missing types & inference
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from decimal import Decimal
7
+ from functools import partial
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ overload,
11
+ )
12
+ import warnings
13
+
14
+ import numpy as np
15
+
16
+ from pandas._config import get_option
17
+
18
+ from pandas._libs import lib
19
+ import pandas._libs.missing as libmissing
20
+ from pandas._libs.tslibs import (
21
+ NaT,
22
+ iNaT,
23
+ )
24
+
25
+ from pandas.core.dtypes.common import (
26
+ DT64NS_DTYPE,
27
+ TD64NS_DTYPE,
28
+ ensure_object,
29
+ is_scalar,
30
+ is_string_or_object_np_dtype,
31
+ )
32
+ from pandas.core.dtypes.dtypes import (
33
+ CategoricalDtype,
34
+ DatetimeTZDtype,
35
+ ExtensionDtype,
36
+ IntervalDtype,
37
+ PeriodDtype,
38
+ )
39
+ from pandas.core.dtypes.generic import (
40
+ ABCDataFrame,
41
+ ABCExtensionArray,
42
+ ABCIndex,
43
+ ABCMultiIndex,
44
+ ABCSeries,
45
+ )
46
+ from pandas.core.dtypes.inference import is_list_like
47
+
48
+ if TYPE_CHECKING:
49
+ from re import Pattern
50
+
51
+ from pandas._typing import (
52
+ ArrayLike,
53
+ DtypeObj,
54
+ NDFrame,
55
+ NDFrameT,
56
+ Scalar,
57
+ npt,
58
+ )
59
+
60
+ from pandas import Series
61
+ from pandas.core.indexes.base import Index
62
+
63
+
64
+ isposinf_scalar = libmissing.isposinf_scalar
65
+ isneginf_scalar = libmissing.isneginf_scalar
66
+
67
+ nan_checker = np.isnan
68
+ INF_AS_NA = False
69
+ _dtype_object = np.dtype("object")
70
+ _dtype_str = np.dtype(str)
71
+
72
+
73
+ @overload
74
+ def isna(obj: Scalar | Pattern) -> bool:
75
+ ...
76
+
77
+
78
+ @overload
79
+ def isna(
80
+ obj: ArrayLike | Index | list,
81
+ ) -> npt.NDArray[np.bool_]:
82
+ ...
83
+
84
+
85
+ @overload
86
+ def isna(obj: NDFrameT) -> NDFrameT:
87
+ ...
88
+
89
+
90
+ # handle unions
91
+ @overload
92
+ def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
93
+ ...
94
+
95
+
96
+ @overload
97
+ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
98
+ ...
99
+
100
+
101
+ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
102
+ """
103
+ Detect missing values for an array-like object.
104
+
105
+ This function takes a scalar or array-like object and indicates
106
+ whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
107
+ in object arrays, ``NaT`` in datetimelike).
108
+
109
+ Parameters
110
+ ----------
111
+ obj : scalar or array-like
112
+ Object to check for null or missing values.
113
+
114
+ Returns
115
+ -------
116
+ bool or array-like of bool
117
+ For scalar input, returns a scalar boolean.
118
+ For array input, returns an array of boolean indicating whether each
119
+ corresponding element is missing.
120
+
121
+ See Also
122
+ --------
123
+ notna : Boolean inverse of pandas.isna.
124
+ Series.isna : Detect missing values in a Series.
125
+ DataFrame.isna : Detect missing values in a DataFrame.
126
+ Index.isna : Detect missing values in an Index.
127
+
128
+ Examples
129
+ --------
130
+ Scalar arguments (including strings) result in a scalar boolean.
131
+
132
+ >>> pd.isna('dog')
133
+ False
134
+
135
+ >>> pd.isna(pd.NA)
136
+ True
137
+
138
+ >>> pd.isna(np.nan)
139
+ True
140
+
141
+ ndarrays result in an ndarray of booleans.
142
+
143
+ >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
144
+ >>> array
145
+ array([[ 1., nan, 3.],
146
+ [ 4., 5., nan]])
147
+ >>> pd.isna(array)
148
+ array([[False, True, False],
149
+ [False, False, True]])
150
+
151
+ For indexes, an ndarray of booleans is returned.
152
+
153
+ >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
154
+ ... "2017-07-08"])
155
+ >>> index
156
+ DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
157
+ dtype='datetime64[ns]', freq=None)
158
+ >>> pd.isna(index)
159
+ array([False, False, True, False])
160
+
161
+ For Series and DataFrame, the same type is returned, containing booleans.
162
+
163
+ >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
164
+ >>> df
165
+ 0 1 2
166
+ 0 ant bee cat
167
+ 1 dog None fly
168
+ >>> pd.isna(df)
169
+ 0 1 2
170
+ 0 False False False
171
+ 1 False True False
172
+
173
+ >>> pd.isna(df[1])
174
+ 0 False
175
+ 1 True
176
+ Name: 1, dtype: bool
177
+ """
178
+ return _isna(obj)
179
+
180
+
181
+ isnull = isna
182
+
183
+
184
+ def _isna(obj, inf_as_na: bool = False):
185
+ """
186
+ Detect missing values, treating None, NaN or NA as null. Infinite
187
+ values will also be treated as null if inf_as_na is True.
188
+
189
+ Parameters
190
+ ----------
191
+ obj: ndarray or object value
192
+ Input array or scalar value.
193
+ inf_as_na: bool
194
+ Whether to treat infinity as null.
195
+
196
+ Returns
197
+ -------
198
+ boolean ndarray or boolean
199
+ """
200
+ if is_scalar(obj):
201
+ return libmissing.checknull(obj, inf_as_na=inf_as_na)
202
+ elif isinstance(obj, ABCMultiIndex):
203
+ raise NotImplementedError("isna is not defined for MultiIndex")
204
+ elif isinstance(obj, type):
205
+ return False
206
+ elif isinstance(obj, (np.ndarray, ABCExtensionArray)):
207
+ return _isna_array(obj, inf_as_na=inf_as_na)
208
+ elif isinstance(obj, ABCIndex):
209
+ # Try to use cached isna, which also short-circuits for integer dtypes
210
+ # and avoids materializing RangeIndex._values
211
+ if not obj._can_hold_na:
212
+ return obj.isna()
213
+ return _isna_array(obj._values, inf_as_na=inf_as_na)
214
+
215
+ elif isinstance(obj, ABCSeries):
216
+ result = _isna_array(obj._values, inf_as_na=inf_as_na)
217
+ # box
218
+ result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
219
+ return result
220
+ elif isinstance(obj, ABCDataFrame):
221
+ return obj.isna()
222
+ elif isinstance(obj, list):
223
+ return _isna_array(np.asarray(obj, dtype=object), inf_as_na=inf_as_na)
224
+ elif hasattr(obj, "__array__"):
225
+ return _isna_array(np.asarray(obj), inf_as_na=inf_as_na)
226
+ else:
227
+ return False
228
+
229
+
230
+ def _use_inf_as_na(key) -> None:
231
+ """
232
+ Option change callback for na/inf behaviour.
233
+
234
+ Choose which replacement for numpy.isnan / -numpy.isfinite is used.
235
+
236
+ Parameters
237
+ ----------
238
+ flag: bool
239
+ True means treat None, NaN, INF, -INF as null (old way),
240
+ False means None and NaN are null, but INF, -INF are not null
241
+ (new way).
242
+
243
+ Notes
244
+ -----
245
+ This approach to setting global module values is discussed and
246
+ approved here:
247
+
248
+ * https://stackoverflow.com/questions/4859217/
249
+ programmatically-creating-variables-in-python/4859312#4859312
250
+ """
251
+ inf_as_na = get_option(key)
252
+ globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na)
253
+ if inf_as_na:
254
+ globals()["nan_checker"] = lambda x: ~np.isfinite(x)
255
+ globals()["INF_AS_NA"] = True
256
+ else:
257
+ globals()["nan_checker"] = np.isnan
258
+ globals()["INF_AS_NA"] = False
259
+
260
+
261
+ def _isna_array(values: ArrayLike, inf_as_na: bool = False):
262
+ """
263
+ Return an array indicating which values of the input array are NaN / NA.
264
+
265
+ Parameters
266
+ ----------
267
+ obj: ndarray or ExtensionArray
268
+ The input array whose elements are to be checked.
269
+ inf_as_na: bool
270
+ Whether or not to treat infinite values as NA.
271
+
272
+ Returns
273
+ -------
274
+ array-like
275
+ Array of boolean values denoting the NA status of each element.
276
+ """
277
+ dtype = values.dtype
278
+
279
+ if not isinstance(values, np.ndarray):
280
+ # i.e. ExtensionArray
281
+ if inf_as_na and isinstance(dtype, CategoricalDtype):
282
+ result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na)
283
+ else:
284
+ # error: Incompatible types in assignment (expression has type
285
+ # "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has
286
+ # type "ndarray[Any, dtype[bool_]]")
287
+ result = values.isna() # type: ignore[assignment]
288
+ elif isinstance(values, np.rec.recarray):
289
+ # GH 48526
290
+ result = _isna_recarray_dtype(values, inf_as_na=inf_as_na)
291
+ elif is_string_or_object_np_dtype(values.dtype):
292
+ result = _isna_string_dtype(values, inf_as_na=inf_as_na)
293
+ elif dtype.kind in "mM":
294
+ # this is the NaT pattern
295
+ result = values.view("i8") == iNaT
296
+ else:
297
+ if inf_as_na:
298
+ result = ~np.isfinite(values)
299
+ else:
300
+ result = np.isnan(values)
301
+
302
+ return result
303
+
304
+
305
+ def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> npt.NDArray[np.bool_]:
306
+ # Working around NumPy ticket 1542
307
+ dtype = values.dtype
308
+
309
+ if dtype.kind in ("S", "U"):
310
+ result = np.zeros(values.shape, dtype=bool)
311
+ else:
312
+ if values.ndim in {1, 2}:
313
+ result = libmissing.isnaobj(values, inf_as_na=inf_as_na)
314
+ else:
315
+ # 0-D, reached via e.g. mask_missing
316
+ result = libmissing.isnaobj(values.ravel(), inf_as_na=inf_as_na)
317
+ result = result.reshape(values.shape)
318
+
319
+ return result
320
+
321
+
322
+ def _has_record_inf_value(record_as_array: np.ndarray) -> np.bool_:
323
+ is_inf_in_record = np.zeros(len(record_as_array), dtype=bool)
324
+ for i, value in enumerate(record_as_array):
325
+ is_element_inf = False
326
+ try:
327
+ is_element_inf = np.isinf(value)
328
+ except TypeError:
329
+ is_element_inf = False
330
+ is_inf_in_record[i] = is_element_inf
331
+
332
+ return np.any(is_inf_in_record)
333
+
334
+
335
+ def _isna_recarray_dtype(
336
+ values: np.rec.recarray, inf_as_na: bool
337
+ ) -> npt.NDArray[np.bool_]:
338
+ result = np.zeros(values.shape, dtype=bool)
339
+ for i, record in enumerate(values):
340
+ record_as_array = np.array(record.tolist())
341
+ does_record_contain_nan = isna_all(record_as_array)
342
+ does_record_contain_inf = False
343
+ if inf_as_na:
344
+ does_record_contain_inf = bool(_has_record_inf_value(record_as_array))
345
+ result[i] = np.any(
346
+ np.logical_or(does_record_contain_nan, does_record_contain_inf)
347
+ )
348
+
349
+ return result
350
+
351
+
352
+ @overload
353
+ def notna(obj: Scalar) -> bool:
354
+ ...
355
+
356
+
357
+ @overload
358
+ def notna(
359
+ obj: ArrayLike | Index | list,
360
+ ) -> npt.NDArray[np.bool_]:
361
+ ...
362
+
363
+
364
+ @overload
365
+ def notna(obj: NDFrameT) -> NDFrameT:
366
+ ...
367
+
368
+
369
+ # handle unions
370
+ @overload
371
+ def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
372
+ ...
373
+
374
+
375
+ @overload
376
+ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
377
+ ...
378
+
379
+
380
+ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
381
+ """
382
+ Detect non-missing values for an array-like object.
383
+
384
+ This function takes a scalar or array-like object and indicates
385
+ whether values are valid (not missing, which is ``NaN`` in numeric
386
+ arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
387
+
388
+ Parameters
389
+ ----------
390
+ obj : array-like or object value
391
+ Object to check for *not* null or *non*-missing values.
392
+
393
+ Returns
394
+ -------
395
+ bool or array-like of bool
396
+ For scalar input, returns a scalar boolean.
397
+ For array input, returns an array of boolean indicating whether each
398
+ corresponding element is valid.
399
+
400
+ See Also
401
+ --------
402
+ isna : Boolean inverse of pandas.notna.
403
+ Series.notna : Detect valid values in a Series.
404
+ DataFrame.notna : Detect valid values in a DataFrame.
405
+ Index.notna : Detect valid values in an Index.
406
+
407
+ Examples
408
+ --------
409
+ Scalar arguments (including strings) result in a scalar boolean.
410
+
411
+ >>> pd.notna('dog')
412
+ True
413
+
414
+ >>> pd.notna(pd.NA)
415
+ False
416
+
417
+ >>> pd.notna(np.nan)
418
+ False
419
+
420
+ ndarrays result in an ndarray of booleans.
421
+
422
+ >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
423
+ >>> array
424
+ array([[ 1., nan, 3.],
425
+ [ 4., 5., nan]])
426
+ >>> pd.notna(array)
427
+ array([[ True, False, True],
428
+ [ True, True, False]])
429
+
430
+ For indexes, an ndarray of booleans is returned.
431
+
432
+ >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
433
+ ... "2017-07-08"])
434
+ >>> index
435
+ DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
436
+ dtype='datetime64[ns]', freq=None)
437
+ >>> pd.notna(index)
438
+ array([ True, True, False, True])
439
+
440
+ For Series and DataFrame, the same type is returned, containing booleans.
441
+
442
+ >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
443
+ >>> df
444
+ 0 1 2
445
+ 0 ant bee cat
446
+ 1 dog None fly
447
+ >>> pd.notna(df)
448
+ 0 1 2
449
+ 0 True True True
450
+ 1 True False True
451
+
452
+ >>> pd.notna(df[1])
453
+ 0 True
454
+ 1 False
455
+ Name: 1, dtype: bool
456
+ """
457
+ res = isna(obj)
458
+ if isinstance(res, bool):
459
+ return not res
460
+ return ~res
461
+
462
+
463
+ notnull = notna
464
+
465
+
466
+ def array_equivalent(
467
+ left,
468
+ right,
469
+ strict_nan: bool = False,
470
+ dtype_equal: bool = False,
471
+ ) -> bool:
472
+ """
473
+ True if two arrays, left and right, have equal non-NaN elements, and NaNs
474
+ in corresponding locations. False otherwise. It is assumed that left and
475
+ right are NumPy arrays of the same dtype. The behavior of this function
476
+ (particularly with respect to NaNs) is not defined if the dtypes are
477
+ different.
478
+
479
+ Parameters
480
+ ----------
481
+ left, right : ndarrays
482
+ strict_nan : bool, default False
483
+ If True, consider NaN and None to be different.
484
+ dtype_equal : bool, default False
485
+ Whether `left` and `right` are known to have the same dtype
486
+ according to `is_dtype_equal`. Some methods like `BlockManager.equals`.
487
+ require that the dtypes match. Setting this to ``True`` can improve
488
+ performance, but will give different results for arrays that are
489
+ equal but different dtypes.
490
+
491
+ Returns
492
+ -------
493
+ b : bool
494
+ Returns True if the arrays are equivalent.
495
+
496
+ Examples
497
+ --------
498
+ >>> array_equivalent(
499
+ ... np.array([1, 2, np.nan]),
500
+ ... np.array([1, 2, np.nan]))
501
+ True
502
+ >>> array_equivalent(
503
+ ... np.array([1, np.nan, 2]),
504
+ ... np.array([1, 2, np.nan]))
505
+ False
506
+ """
507
+ left, right = np.asarray(left), np.asarray(right)
508
+
509
+ # shape compat
510
+ if left.shape != right.shape:
511
+ return False
512
+
513
+ if dtype_equal:
514
+ # fastpath when we require that the dtypes match (Block.equals)
515
+ if left.dtype.kind in "fc":
516
+ return _array_equivalent_float(left, right)
517
+ elif left.dtype.kind in "mM":
518
+ return _array_equivalent_datetimelike(left, right)
519
+ elif is_string_or_object_np_dtype(left.dtype):
520
+ # TODO: fastpath for pandas' StringDtype
521
+ return _array_equivalent_object(left, right, strict_nan)
522
+ else:
523
+ return np.array_equal(left, right)
524
+
525
+ # Slow path when we allow comparing different dtypes.
526
+ # Object arrays can contain None, NaN and NaT.
527
+ # string dtypes must be come to this path for NumPy 1.7.1 compat
528
+ if left.dtype.kind in "OSU" or right.dtype.kind in "OSU":
529
+ # Note: `in "OSU"` is non-trivially faster than `in ["O", "S", "U"]`
530
+ # or `in ("O", "S", "U")`
531
+ return _array_equivalent_object(left, right, strict_nan)
532
+
533
+ # NaNs can occur in float and complex arrays.
534
+ if left.dtype.kind in "fc":
535
+ if not (left.size and right.size):
536
+ return True
537
+ return ((left == right) | (isna(left) & isna(right))).all()
538
+
539
+ elif left.dtype.kind in "mM" or right.dtype.kind in "mM":
540
+ # datetime64, timedelta64, Period
541
+ if left.dtype != right.dtype:
542
+ return False
543
+
544
+ left = left.view("i8")
545
+ right = right.view("i8")
546
+
547
+ # if we have structured dtypes, compare first
548
+ if (
549
+ left.dtype.type is np.void or right.dtype.type is np.void
550
+ ) and left.dtype != right.dtype:
551
+ return False
552
+
553
+ return np.array_equal(left, right)
554
+
555
+
556
+ def _array_equivalent_float(left: np.ndarray, right: np.ndarray) -> bool:
557
+ return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all())
558
+
559
+
560
+ def _array_equivalent_datetimelike(left: np.ndarray, right: np.ndarray):
561
+ return np.array_equal(left.view("i8"), right.view("i8"))
562
+
563
+
564
+ def _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bool):
565
+ left = ensure_object(left)
566
+ right = ensure_object(right)
567
+
568
+ mask: npt.NDArray[np.bool_] | None = None
569
+ if strict_nan:
570
+ mask = isna(left) & isna(right)
571
+ if not mask.any():
572
+ mask = None
573
+
574
+ try:
575
+ if mask is None:
576
+ return lib.array_equivalent_object(left, right)
577
+ if not lib.array_equivalent_object(left[~mask], right[~mask]):
578
+ return False
579
+ left_remaining = left[mask]
580
+ right_remaining = right[mask]
581
+ except ValueError:
582
+ # can raise a ValueError if left and right cannot be
583
+ # compared (e.g. nested arrays)
584
+ left_remaining = left
585
+ right_remaining = right
586
+
587
+ for left_value, right_value in zip(left_remaining, right_remaining):
588
+ if left_value is NaT and right_value is not NaT:
589
+ return False
590
+
591
+ elif left_value is libmissing.NA and right_value is not libmissing.NA:
592
+ return False
593
+
594
+ elif isinstance(left_value, float) and np.isnan(left_value):
595
+ if not isinstance(right_value, float) or not np.isnan(right_value):
596
+ return False
597
+ else:
598
+ with warnings.catch_warnings():
599
+ # suppress numpy's "elementwise comparison failed"
600
+ warnings.simplefilter("ignore", DeprecationWarning)
601
+ try:
602
+ if np.any(np.asarray(left_value != right_value)):
603
+ return False
604
+ except TypeError as err:
605
+ if "boolean value of NA is ambiguous" in str(err):
606
+ return False
607
+ raise
608
+ except ValueError:
609
+ # numpy can raise a ValueError if left and right cannot be
610
+ # compared (e.g. nested arrays)
611
+ return False
612
+ return True
613
+
614
+
615
+ def array_equals(left: ArrayLike, right: ArrayLike) -> bool:
616
+ """
617
+ ExtensionArray-compatible implementation of array_equivalent.
618
+ """
619
+ if left.dtype != right.dtype:
620
+ return False
621
+ elif isinstance(left, ABCExtensionArray):
622
+ return left.equals(right)
623
+ else:
624
+ return array_equivalent(left, right, dtype_equal=True)
625
+
626
+
627
+ def infer_fill_value(val):
628
+ """
629
+ infer the fill value for the nan/NaT from the provided
630
+ scalar/ndarray/list-like if we are a NaT, return the correct dtyped
631
+ element to provide proper block construction
632
+ """
633
+ if not is_list_like(val):
634
+ val = [val]
635
+ val = np.asarray(val)
636
+ if val.dtype.kind in "mM":
637
+ return np.array("NaT", dtype=val.dtype)
638
+ elif val.dtype == object:
639
+ dtype = lib.infer_dtype(ensure_object(val), skipna=False)
640
+ if dtype in ["datetime", "datetime64"]:
641
+ return np.array("NaT", dtype=DT64NS_DTYPE)
642
+ elif dtype in ["timedelta", "timedelta64"]:
643
+ return np.array("NaT", dtype=TD64NS_DTYPE)
644
+ return np.array(np.nan, dtype=object)
645
+ elif val.dtype.kind == "U":
646
+ return np.array(np.nan, dtype=val.dtype)
647
+ return np.nan
648
+
649
+
650
+ def construct_1d_array_from_inferred_fill_value(
651
+ value: object, length: int
652
+ ) -> ArrayLike:
653
+ # Find our empty_value dtype by constructing an array
654
+ # from our value and doing a .take on it
655
+ from pandas.core.algorithms import take_nd
656
+ from pandas.core.construction import sanitize_array
657
+ from pandas.core.indexes.base import Index
658
+
659
+ arr = sanitize_array(value, Index(range(1)), copy=False)
660
+ taker = -1 * np.ones(length, dtype=np.intp)
661
+ return take_nd(arr, taker)
662
+
663
+
664
+ def maybe_fill(arr: np.ndarray) -> np.ndarray:
665
+ """
666
+ Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype.
667
+ """
668
+ if arr.dtype.kind not in "iub":
669
+ arr.fill(np.nan)
670
+ return arr
671
+
672
+
673
+ def na_value_for_dtype(dtype: DtypeObj, compat: bool = True):
674
+ """
675
+ Return a dtype compat na value
676
+
677
+ Parameters
678
+ ----------
679
+ dtype : string / dtype
680
+ compat : bool, default True
681
+
682
+ Returns
683
+ -------
684
+ np.dtype or a pandas dtype
685
+
686
+ Examples
687
+ --------
688
+ >>> na_value_for_dtype(np.dtype('int64'))
689
+ 0
690
+ >>> na_value_for_dtype(np.dtype('int64'), compat=False)
691
+ nan
692
+ >>> na_value_for_dtype(np.dtype('float64'))
693
+ nan
694
+ >>> na_value_for_dtype(np.dtype('bool'))
695
+ False
696
+ >>> na_value_for_dtype(np.dtype('datetime64[ns]'))
697
+ numpy.datetime64('NaT')
698
+ """
699
+
700
+ if isinstance(dtype, ExtensionDtype):
701
+ return dtype.na_value
702
+ elif dtype.kind in "mM":
703
+ unit = np.datetime_data(dtype)[0]
704
+ return dtype.type("NaT", unit)
705
+ elif dtype.kind == "f":
706
+ return np.nan
707
+ elif dtype.kind in "iu":
708
+ if compat:
709
+ return 0
710
+ return np.nan
711
+ elif dtype.kind == "b":
712
+ if compat:
713
+ return False
714
+ return np.nan
715
+ return np.nan
716
+
717
+
718
+ def remove_na_arraylike(arr: Series | Index | np.ndarray):
719
+ """
720
+ Return array-like containing only true/non-NaN values, possibly empty.
721
+ """
722
+ if isinstance(arr.dtype, ExtensionDtype):
723
+ return arr[notna(arr)]
724
+ else:
725
+ return arr[notna(np.asarray(arr))]
726
+
727
+
728
+ def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:
729
+ """
730
+ isna check that excludes incompatible dtypes
731
+
732
+ Parameters
733
+ ----------
734
+ obj : object
735
+ dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype
736
+
737
+ Returns
738
+ -------
739
+ bool
740
+ """
741
+ if not lib.is_scalar(obj) or not isna(obj):
742
+ return False
743
+ elif dtype.kind == "M":
744
+ if isinstance(dtype, np.dtype):
745
+ # i.e. not tzaware
746
+ return not isinstance(obj, (np.timedelta64, Decimal))
747
+ # we have to rule out tznaive dt64("NaT")
748
+ return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal))
749
+ elif dtype.kind == "m":
750
+ return not isinstance(obj, (np.datetime64, Decimal))
751
+ elif dtype.kind in "iufc":
752
+ # Numeric
753
+ return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64))
754
+ elif dtype.kind == "b":
755
+ # We allow pd.NA, None, np.nan in BooleanArray (same as IntervalDtype)
756
+ return lib.is_float(obj) or obj is None or obj is libmissing.NA
757
+
758
+ elif dtype == _dtype_str:
759
+ # numpy string dtypes to avoid float np.nan
760
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float))
761
+
762
+ elif dtype == _dtype_object:
763
+ # This is needed for Categorical, but is kind of weird
764
+ return True
765
+
766
+ elif isinstance(dtype, PeriodDtype):
767
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
768
+
769
+ elif isinstance(dtype, IntervalDtype):
770
+ return lib.is_float(obj) or obj is None or obj is libmissing.NA
771
+
772
+ elif isinstance(dtype, CategoricalDtype):
773
+ return is_valid_na_for_dtype(obj, dtype.categories.dtype)
774
+
775
+ # fallback, default to allowing NaN, None, NA, NaT
776
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
777
+
778
+
779
+ def isna_all(arr: ArrayLike) -> bool:
780
+ """
781
+ Optimized equivalent to isna(arr).all()
782
+ """
783
+ total_len = len(arr)
784
+
785
+ # Usually it's enough to check but a small fraction of values to see if
786
+ # a block is NOT null, chunks should help in such cases.
787
+ # parameters 1000 and 40 were chosen arbitrarily
788
+ chunk_len = max(total_len // 40, 1000)
789
+
790
+ dtype = arr.dtype
791
+ if lib.is_np_dtype(dtype, "f"):
792
+ checker = nan_checker
793
+
794
+ elif (lib.is_np_dtype(dtype, "mM")) or isinstance(
795
+ dtype, (DatetimeTZDtype, PeriodDtype)
796
+ ):
797
+ # error: Incompatible types in assignment (expression has type
798
+ # "Callable[[Any], Any]", variable has type "ufunc")
799
+ checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment]
800
+
801
+ else:
802
+ # error: Incompatible types in assignment (expression has type "Callable[[Any],
803
+ # Any]", variable has type "ufunc")
804
+ checker = lambda x: _isna_array( # type: ignore[assignment]
805
+ x, inf_as_na=INF_AS_NA
806
+ )
807
+
808
+ return all(
809
+ checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len)
810
+ )
vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.groupby.generic import (
2
+ DataFrameGroupBy,
3
+ NamedAgg,
4
+ SeriesGroupBy,
5
+ )
6
+ from pandas.core.groupby.groupby import GroupBy
7
+ from pandas.core.groupby.grouper import Grouper
8
+
9
+ __all__ = [
10
+ "DataFrameGroupBy",
11
+ "NamedAgg",
12
+ "SeriesGroupBy",
13
+ "GroupBy",
14
+ "Grouper",
15
+ ]
vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/generic.py ADDED
@@ -0,0 +1,2852 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Define the SeriesGroupBy and DataFrameGroupBy
3
+ classes that hold the groupby interfaces (and some implementations).
4
+
5
+ These are user facing as the result of the ``df.groupby(...)`` operations,
6
+ which here returns a DataFrameGroupBy object.
7
+ """
8
+ from __future__ import annotations
9
+
10
+ from collections import abc
11
+ from functools import partial
12
+ from textwrap import dedent
13
+ from typing import (
14
+ TYPE_CHECKING,
15
+ Any,
16
+ Callable,
17
+ Literal,
18
+ NamedTuple,
19
+ TypeVar,
20
+ Union,
21
+ cast,
22
+ )
23
+ import warnings
24
+
25
+ import numpy as np
26
+
27
+ from pandas._libs import (
28
+ Interval,
29
+ lib,
30
+ )
31
+ from pandas._libs.hashtable import duplicated
32
+ from pandas.errors import SpecificationError
33
+ from pandas.util._decorators import (
34
+ Appender,
35
+ Substitution,
36
+ doc,
37
+ )
38
+ from pandas.util._exceptions import find_stack_level
39
+
40
+ from pandas.core.dtypes.common import (
41
+ ensure_int64,
42
+ is_bool,
43
+ is_dict_like,
44
+ is_integer_dtype,
45
+ is_list_like,
46
+ is_numeric_dtype,
47
+ is_scalar,
48
+ )
49
+ from pandas.core.dtypes.dtypes import (
50
+ CategoricalDtype,
51
+ IntervalDtype,
52
+ )
53
+ from pandas.core.dtypes.inference import is_hashable
54
+ from pandas.core.dtypes.missing import (
55
+ isna,
56
+ notna,
57
+ )
58
+
59
+ from pandas.core import algorithms
60
+ from pandas.core.apply import (
61
+ GroupByApply,
62
+ maybe_mangle_lambdas,
63
+ reconstruct_func,
64
+ validate_func_kwargs,
65
+ warn_alias_replacement,
66
+ )
67
+ import pandas.core.common as com
68
+ from pandas.core.frame import DataFrame
69
+ from pandas.core.groupby import (
70
+ base,
71
+ ops,
72
+ )
73
+ from pandas.core.groupby.groupby import (
74
+ GroupBy,
75
+ GroupByPlot,
76
+ _agg_template_frame,
77
+ _agg_template_series,
78
+ _apply_docs,
79
+ _transform_template,
80
+ )
81
+ from pandas.core.indexes.api import (
82
+ Index,
83
+ MultiIndex,
84
+ all_indexes_same,
85
+ default_index,
86
+ )
87
+ from pandas.core.series import Series
88
+ from pandas.core.sorting import get_group_index
89
+ from pandas.core.util.numba_ import maybe_use_numba
90
+
91
+ from pandas.plotting import boxplot_frame_groupby
92
+
93
+ if TYPE_CHECKING:
94
+ from collections.abc import (
95
+ Hashable,
96
+ Mapping,
97
+ Sequence,
98
+ )
99
+
100
+ from pandas._typing import (
101
+ ArrayLike,
102
+ Axis,
103
+ AxisInt,
104
+ CorrelationMethod,
105
+ FillnaOptions,
106
+ IndexLabel,
107
+ Manager,
108
+ Manager2D,
109
+ SingleManager,
110
+ TakeIndexer,
111
+ )
112
+
113
+ from pandas import Categorical
114
+ from pandas.core.generic import NDFrame
115
+
116
+ # TODO(typing) the return value on this callable should be any *scalar*.
117
+ AggScalar = Union[str, Callable[..., Any]]
118
+ # TODO: validate types on ScalarResult and move to _typing
119
+ # Blocked from using by https://github.com/python/mypy/issues/1484
120
+ # See note at _mangle_lambda_list
121
+ ScalarResult = TypeVar("ScalarResult")
122
+
123
+
124
+ class NamedAgg(NamedTuple):
125
+ """
126
+ Helper for column specific aggregation with control over output column names.
127
+
128
+ Subclass of typing.NamedTuple.
129
+
130
+ Parameters
131
+ ----------
132
+ column : Hashable
133
+ Column label in the DataFrame to apply aggfunc.
134
+ aggfunc : function or str
135
+ Function to apply to the provided column. If string, the name of a built-in
136
+ pandas function.
137
+
138
+ Examples
139
+ --------
140
+ >>> df = pd.DataFrame({"key": [1, 1, 2], "a": [-1, 0, 1], 1: [10, 11, 12]})
141
+ >>> agg_a = pd.NamedAgg(column="a", aggfunc="min")
142
+ >>> agg_1 = pd.NamedAgg(column=1, aggfunc=lambda x: np.mean(x))
143
+ >>> df.groupby("key").agg(result_a=agg_a, result_1=agg_1)
144
+ result_a result_1
145
+ key
146
+ 1 -1 10.5
147
+ 2 1 12.0
148
+ """
149
+
150
+ column: Hashable
151
+ aggfunc: AggScalar
152
+
153
+
154
+ class SeriesGroupBy(GroupBy[Series]):
155
+ def _wrap_agged_manager(self, mgr: Manager) -> Series:
156
+ out = self.obj._constructor_from_mgr(mgr, axes=mgr.axes)
157
+ out._name = self.obj.name
158
+ return out
159
+
160
+ def _get_data_to_aggregate(
161
+ self, *, numeric_only: bool = False, name: str | None = None
162
+ ) -> SingleManager:
163
+ ser = self._obj_with_exclusions
164
+ single = ser._mgr
165
+ if numeric_only and not is_numeric_dtype(ser.dtype):
166
+ # GH#41291 match Series behavior
167
+ kwd_name = "numeric_only"
168
+ raise TypeError(
169
+ f"Cannot use {kwd_name}=True with "
170
+ f"{type(self).__name__}.{name} and non-numeric dtypes."
171
+ )
172
+ return single
173
+
174
+ _agg_examples_doc = dedent(
175
+ """
176
+ Examples
177
+ --------
178
+ >>> s = pd.Series([1, 2, 3, 4])
179
+
180
+ >>> s
181
+ 0 1
182
+ 1 2
183
+ 2 3
184
+ 3 4
185
+ dtype: int64
186
+
187
+ >>> s.groupby([1, 1, 2, 2]).min()
188
+ 1 1
189
+ 2 3
190
+ dtype: int64
191
+
192
+ >>> s.groupby([1, 1, 2, 2]).agg('min')
193
+ 1 1
194
+ 2 3
195
+ dtype: int64
196
+
197
+ >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
198
+ min max
199
+ 1 1 2
200
+ 2 3 4
201
+
202
+ The output column names can be controlled by passing
203
+ the desired column names and aggregations as keyword arguments.
204
+
205
+ >>> s.groupby([1, 1, 2, 2]).agg(
206
+ ... minimum='min',
207
+ ... maximum='max',
208
+ ... )
209
+ minimum maximum
210
+ 1 1 2
211
+ 2 3 4
212
+
213
+ .. versionchanged:: 1.3.0
214
+
215
+ The resulting dtype will reflect the return value of the aggregating function.
216
+
217
+ >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
218
+ 1 1.0
219
+ 2 3.0
220
+ dtype: float64
221
+ """
222
+ )
223
+
224
+ @Appender(
225
+ _apply_docs["template"].format(
226
+ input="series", examples=_apply_docs["series_examples"]
227
+ )
228
+ )
229
+ def apply(self, func, *args, **kwargs) -> Series:
230
+ return super().apply(func, *args, **kwargs)
231
+
232
+ @doc(_agg_template_series, examples=_agg_examples_doc, klass="Series")
233
+ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
234
+ relabeling = func is None
235
+ columns = None
236
+ if relabeling:
237
+ columns, func = validate_func_kwargs(kwargs)
238
+ kwargs = {}
239
+
240
+ if isinstance(func, str):
241
+ if maybe_use_numba(engine) and engine is not None:
242
+ # Not all agg functions support numba, only propagate numba kwargs
243
+ # if user asks for numba, and engine is not None
244
+ # (if engine is None, the called function will handle the case where
245
+ # numba is requested via the global option)
246
+ kwargs["engine"] = engine
247
+ if engine_kwargs is not None:
248
+ kwargs["engine_kwargs"] = engine_kwargs
249
+ return getattr(self, func)(*args, **kwargs)
250
+
251
+ elif isinstance(func, abc.Iterable):
252
+ # Catch instances of lists / tuples
253
+ # but not the class list / tuple itself.
254
+ func = maybe_mangle_lambdas(func)
255
+ kwargs["engine"] = engine
256
+ kwargs["engine_kwargs"] = engine_kwargs
257
+ ret = self._aggregate_multiple_funcs(func, *args, **kwargs)
258
+ if relabeling:
259
+ # columns is not narrowed by mypy from relabeling flag
260
+ assert columns is not None # for mypy
261
+ ret.columns = columns
262
+ if not self.as_index:
263
+ ret = ret.reset_index()
264
+ return ret
265
+
266
+ else:
267
+ cyfunc = com.get_cython_func(func)
268
+ if cyfunc and not args and not kwargs:
269
+ warn_alias_replacement(self, func, cyfunc)
270
+ return getattr(self, cyfunc)()
271
+
272
+ if maybe_use_numba(engine):
273
+ return self._aggregate_with_numba(
274
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
275
+ )
276
+
277
+ if self.ngroups == 0:
278
+ # e.g. test_evaluate_with_empty_groups without any groups to
279
+ # iterate over, we have no output on which to do dtype
280
+ # inference. We default to using the existing dtype.
281
+ # xref GH#51445
282
+ obj = self._obj_with_exclusions
283
+ return self.obj._constructor(
284
+ [],
285
+ name=self.obj.name,
286
+ index=self._grouper.result_index,
287
+ dtype=obj.dtype,
288
+ )
289
+
290
+ if self._grouper.nkeys > 1:
291
+ return self._python_agg_general(func, *args, **kwargs)
292
+
293
+ try:
294
+ return self._python_agg_general(func, *args, **kwargs)
295
+ except KeyError:
296
+ # KeyError raised in test_groupby.test_basic is bc the func does
297
+ # a dictionary lookup on group.name, but group name is not
298
+ # pinned in _python_agg_general, only in _aggregate_named
299
+ result = self._aggregate_named(func, *args, **kwargs)
300
+
301
+ warnings.warn(
302
+ "Pinning the groupby key to each group in "
303
+ f"{type(self).__name__}.agg is deprecated, and cases that "
304
+ "relied on it will raise in a future version. "
305
+ "If your operation requires utilizing the groupby keys, "
306
+ "iterate over the groupby object instead.",
307
+ FutureWarning,
308
+ stacklevel=find_stack_level(),
309
+ )
310
+
311
+ # result is a dict whose keys are the elements of result_index
312
+ result = Series(result, index=self._grouper.result_index)
313
+ result = self._wrap_aggregated_output(result)
314
+ return result
315
+
316
+ agg = aggregate
317
+
318
+ def _python_agg_general(self, func, *args, **kwargs):
319
+ orig_func = func
320
+ func = com.is_builtin_func(func)
321
+ if orig_func != func:
322
+ alias = com._builtin_table_alias[func]
323
+ warn_alias_replacement(self, orig_func, alias)
324
+ f = lambda x: func(x, *args, **kwargs)
325
+
326
+ obj = self._obj_with_exclusions
327
+ result = self._grouper.agg_series(obj, f)
328
+ res = obj._constructor(result, name=obj.name)
329
+ return self._wrap_aggregated_output(res)
330
+
331
+ def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame:
332
+ if isinstance(arg, dict):
333
+ if self.as_index:
334
+ # GH 15931
335
+ raise SpecificationError("nested renamer is not supported")
336
+ else:
337
+ # GH#50684 - This accidentally worked in 1.x
338
+ msg = (
339
+ "Passing a dictionary to SeriesGroupBy.agg is deprecated "
340
+ "and will raise in a future version of pandas. Pass a list "
341
+ "of aggregations instead."
342
+ )
343
+ warnings.warn(
344
+ message=msg,
345
+ category=FutureWarning,
346
+ stacklevel=find_stack_level(),
347
+ )
348
+ arg = list(arg.items())
349
+ elif any(isinstance(x, (tuple, list)) for x in arg):
350
+ arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
351
+ else:
352
+ # list of functions / function names
353
+ columns = (com.get_callable_name(f) or f for f in arg)
354
+ arg = zip(columns, arg)
355
+
356
+ results: dict[base.OutputKey, DataFrame | Series] = {}
357
+ with com.temp_setattr(self, "as_index", True):
358
+ # Combine results using the index, need to adjust index after
359
+ # if as_index=False (GH#50724)
360
+ for idx, (name, func) in enumerate(arg):
361
+ key = base.OutputKey(label=name, position=idx)
362
+ results[key] = self.aggregate(func, *args, **kwargs)
363
+
364
+ if any(isinstance(x, DataFrame) for x in results.values()):
365
+ from pandas import concat
366
+
367
+ res_df = concat(
368
+ results.values(), axis=1, keys=[key.label for key in results]
369
+ )
370
+ return res_df
371
+
372
+ indexed_output = {key.position: val for key, val in results.items()}
373
+ output = self.obj._constructor_expanddim(indexed_output, index=None)
374
+ output.columns = Index(key.label for key in results)
375
+
376
+ return output
377
+
378
+ def _wrap_applied_output(
379
+ self,
380
+ data: Series,
381
+ values: list[Any],
382
+ not_indexed_same: bool = False,
383
+ is_transform: bool = False,
384
+ ) -> DataFrame | Series:
385
+ """
386
+ Wrap the output of SeriesGroupBy.apply into the expected result.
387
+
388
+ Parameters
389
+ ----------
390
+ data : Series
391
+ Input data for groupby operation.
392
+ values : List[Any]
393
+ Applied output for each group.
394
+ not_indexed_same : bool, default False
395
+ Whether the applied outputs are not indexed the same as the group axes.
396
+
397
+ Returns
398
+ -------
399
+ DataFrame or Series
400
+ """
401
+ if len(values) == 0:
402
+ # GH #6265
403
+ if is_transform:
404
+ # GH#47787 see test_group_on_empty_multiindex
405
+ res_index = data.index
406
+ else:
407
+ res_index = self._grouper.result_index
408
+
409
+ return self.obj._constructor(
410
+ [],
411
+ name=self.obj.name,
412
+ index=res_index,
413
+ dtype=data.dtype,
414
+ )
415
+ assert values is not None
416
+
417
+ if isinstance(values[0], dict):
418
+ # GH #823 #24880
419
+ index = self._grouper.result_index
420
+ res_df = self.obj._constructor_expanddim(values, index=index)
421
+ res_df = self._reindex_output(res_df)
422
+ # if self.observed is False,
423
+ # keep all-NaN rows created while re-indexing
424
+ res_ser = res_df.stack(future_stack=True)
425
+ res_ser.name = self.obj.name
426
+ return res_ser
427
+ elif isinstance(values[0], (Series, DataFrame)):
428
+ result = self._concat_objects(
429
+ values,
430
+ not_indexed_same=not_indexed_same,
431
+ is_transform=is_transform,
432
+ )
433
+ if isinstance(result, Series):
434
+ result.name = self.obj.name
435
+ if not self.as_index and not_indexed_same:
436
+ result = self._insert_inaxis_grouper(result)
437
+ result.index = default_index(len(result))
438
+ return result
439
+ else:
440
+ # GH #6265 #24880
441
+ result = self.obj._constructor(
442
+ data=values, index=self._grouper.result_index, name=self.obj.name
443
+ )
444
+ if not self.as_index:
445
+ result = self._insert_inaxis_grouper(result)
446
+ result.index = default_index(len(result))
447
+ return self._reindex_output(result)
448
+
449
+ def _aggregate_named(self, func, *args, **kwargs):
450
+ # Note: this is very similar to _aggregate_series_pure_python,
451
+ # but that does not pin group.name
452
+ result = {}
453
+ initialized = False
454
+
455
+ for name, group in self._grouper.get_iterator(
456
+ self._obj_with_exclusions, axis=self.axis
457
+ ):
458
+ # needed for pandas/tests/groupby/test_groupby.py::test_basic_aggregations
459
+ object.__setattr__(group, "name", name)
460
+
461
+ output = func(group, *args, **kwargs)
462
+ output = ops.extract_result(output)
463
+ if not initialized:
464
+ # We only do this validation on the first iteration
465
+ ops.check_result_array(output, group.dtype)
466
+ initialized = True
467
+ result[name] = output
468
+
469
+ return result
470
+
471
+ __examples_series_doc = dedent(
472
+ """
473
+ >>> ser = pd.Series([390.0, 350.0, 30.0, 20.0],
474
+ ... index=["Falcon", "Falcon", "Parrot", "Parrot"],
475
+ ... name="Max Speed")
476
+ >>> grouped = ser.groupby([1, 1, 2, 2])
477
+ >>> grouped.transform(lambda x: (x - x.mean()) / x.std())
478
+ Falcon 0.707107
479
+ Falcon -0.707107
480
+ Parrot 0.707107
481
+ Parrot -0.707107
482
+ Name: Max Speed, dtype: float64
483
+
484
+ Broadcast result of the transformation
485
+
486
+ >>> grouped.transform(lambda x: x.max() - x.min())
487
+ Falcon 40.0
488
+ Falcon 40.0
489
+ Parrot 10.0
490
+ Parrot 10.0
491
+ Name: Max Speed, dtype: float64
492
+
493
+ >>> grouped.transform("mean")
494
+ Falcon 370.0
495
+ Falcon 370.0
496
+ Parrot 25.0
497
+ Parrot 25.0
498
+ Name: Max Speed, dtype: float64
499
+
500
+ .. versionchanged:: 1.3.0
501
+
502
+ The resulting dtype will reflect the return value of the passed ``func``,
503
+ for example:
504
+
505
+ >>> grouped.transform(lambda x: x.astype(int).max())
506
+ Falcon 390
507
+ Falcon 390
508
+ Parrot 30
509
+ Parrot 30
510
+ Name: Max Speed, dtype: int64
511
+ """
512
+ )
513
+
514
+ @Substitution(klass="Series", example=__examples_series_doc)
515
+ @Appender(_transform_template)
516
+ def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
517
+ return self._transform(
518
+ func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
519
+ )
520
+
521
+ def _cython_transform(
522
+ self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs
523
+ ):
524
+ assert axis == 0 # handled by caller
525
+
526
+ obj = self._obj_with_exclusions
527
+
528
+ try:
529
+ result = self._grouper._cython_operation(
530
+ "transform", obj._values, how, axis, **kwargs
531
+ )
532
+ except NotImplementedError as err:
533
+ # e.g. test_groupby_raises_string
534
+ raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
535
+
536
+ return obj._constructor(result, index=self.obj.index, name=obj.name)
537
+
538
+ def _transform_general(
539
+ self, func: Callable, engine, engine_kwargs, *args, **kwargs
540
+ ) -> Series:
541
+ """
542
+ Transform with a callable `func`.
543
+ """
544
+ if maybe_use_numba(engine):
545
+ return self._transform_with_numba(
546
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
547
+ )
548
+ assert callable(func)
549
+ klass = type(self.obj)
550
+
551
+ results = []
552
+ for name, group in self._grouper.get_iterator(
553
+ self._obj_with_exclusions, axis=self.axis
554
+ ):
555
+ # this setattr is needed for test_transform_lambda_with_datetimetz
556
+ object.__setattr__(group, "name", name)
557
+ res = func(group, *args, **kwargs)
558
+
559
+ results.append(klass(res, index=group.index))
560
+
561
+ # check for empty "results" to avoid concat ValueError
562
+ if results:
563
+ from pandas.core.reshape.concat import concat
564
+
565
+ concatenated = concat(results)
566
+ result = self._set_result_index_ordered(concatenated)
567
+ else:
568
+ result = self.obj._constructor(dtype=np.float64)
569
+
570
+ result.name = self.obj.name
571
+ return result
572
+
573
+ def filter(self, func, dropna: bool = True, *args, **kwargs):
574
+ """
575
+ Filter elements from groups that don't satisfy a criterion.
576
+
577
+ Elements from groups are filtered if they do not satisfy the
578
+ boolean criterion specified by func.
579
+
580
+ Parameters
581
+ ----------
582
+ func : function
583
+ Criterion to apply to each group. Should return True or False.
584
+ dropna : bool
585
+ Drop groups that do not pass the filter. True by default; if False,
586
+ groups that evaluate False are filled with NaNs.
587
+
588
+ Returns
589
+ -------
590
+ Series
591
+
592
+ Notes
593
+ -----
594
+ Functions that mutate the passed object can produce unexpected
595
+ behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
596
+ for more details.
597
+
598
+ Examples
599
+ --------
600
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
601
+ ... 'foo', 'bar'],
602
+ ... 'B' : [1, 2, 3, 4, 5, 6],
603
+ ... 'C' : [2.0, 5., 8., 1., 2., 9.]})
604
+ >>> grouped = df.groupby('A')
605
+ >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
606
+ 1 2
607
+ 3 4
608
+ 5 6
609
+ Name: B, dtype: int64
610
+ """
611
+ if isinstance(func, str):
612
+ wrapper = lambda x: getattr(x, func)(*args, **kwargs)
613
+ else:
614
+ wrapper = lambda x: func(x, *args, **kwargs)
615
+
616
+ # Interpret np.nan as False.
617
+ def true_and_notna(x) -> bool:
618
+ b = wrapper(x)
619
+ return notna(b) and b
620
+
621
+ try:
622
+ indices = [
623
+ self._get_index(name)
624
+ for name, group in self._grouper.get_iterator(
625
+ self._obj_with_exclusions, axis=self.axis
626
+ )
627
+ if true_and_notna(group)
628
+ ]
629
+ except (ValueError, TypeError) as err:
630
+ raise TypeError("the filter must return a boolean result") from err
631
+
632
+ filtered = self._apply_filter(indices, dropna)
633
+ return filtered
634
+
635
+ def nunique(self, dropna: bool = True) -> Series | DataFrame:
636
+ """
637
+ Return number of unique elements in the group.
638
+
639
+ Returns
640
+ -------
641
+ Series
642
+ Number of unique values within each group.
643
+
644
+ Examples
645
+ --------
646
+ For SeriesGroupby:
647
+
648
+ >>> lst = ['a', 'a', 'b', 'b']
649
+ >>> ser = pd.Series([1, 2, 3, 3], index=lst)
650
+ >>> ser
651
+ a 1
652
+ a 2
653
+ b 3
654
+ b 3
655
+ dtype: int64
656
+ >>> ser.groupby(level=0).nunique()
657
+ a 2
658
+ b 1
659
+ dtype: int64
660
+
661
+ For Resampler:
662
+
663
+ >>> ser = pd.Series([1, 2, 3, 3], index=pd.DatetimeIndex(
664
+ ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
665
+ >>> ser
666
+ 2023-01-01 1
667
+ 2023-01-15 2
668
+ 2023-02-01 3
669
+ 2023-02-15 3
670
+ dtype: int64
671
+ >>> ser.resample('MS').nunique()
672
+ 2023-01-01 2
673
+ 2023-02-01 1
674
+ Freq: MS, dtype: int64
675
+ """
676
+ ids, _, ngroups = self._grouper.group_info
677
+ val = self.obj._values
678
+ codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False)
679
+
680
+ if self._grouper.has_dropped_na:
681
+ mask = ids >= 0
682
+ ids = ids[mask]
683
+ codes = codes[mask]
684
+
685
+ group_index = get_group_index(
686
+ labels=[ids, codes],
687
+ shape=(ngroups, len(uniques)),
688
+ sort=False,
689
+ xnull=dropna,
690
+ )
691
+
692
+ if dropna:
693
+ mask = group_index >= 0
694
+ if (~mask).any():
695
+ ids = ids[mask]
696
+ group_index = group_index[mask]
697
+
698
+ mask = duplicated(group_index, "first")
699
+ res = np.bincount(ids[~mask], minlength=ngroups)
700
+ res = ensure_int64(res)
701
+
702
+ ri = self._grouper.result_index
703
+ result: Series | DataFrame = self.obj._constructor(
704
+ res, index=ri, name=self.obj.name
705
+ )
706
+ if not self.as_index:
707
+ result = self._insert_inaxis_grouper(result)
708
+ result.index = default_index(len(result))
709
+ return self._reindex_output(result, fill_value=0)
710
+
711
+ @doc(Series.describe)
712
+ def describe(self, percentiles=None, include=None, exclude=None) -> Series:
713
+ return super().describe(
714
+ percentiles=percentiles, include=include, exclude=exclude
715
+ )
716
+
717
+ def value_counts(
718
+ self,
719
+ normalize: bool = False,
720
+ sort: bool = True,
721
+ ascending: bool = False,
722
+ bins=None,
723
+ dropna: bool = True,
724
+ ) -> Series | DataFrame:
725
+ name = "proportion" if normalize else "count"
726
+
727
+ if bins is None:
728
+ result = self._value_counts(
729
+ normalize=normalize, sort=sort, ascending=ascending, dropna=dropna
730
+ )
731
+ result.name = name
732
+ return result
733
+
734
+ from pandas.core.reshape.merge import get_join_indexers
735
+ from pandas.core.reshape.tile import cut
736
+
737
+ ids, _, _ = self._grouper.group_info
738
+ val = self.obj._values
739
+
740
+ index_names = self._grouper.names + [self.obj.name]
741
+
742
+ if isinstance(val.dtype, CategoricalDtype) or (
743
+ bins is not None and not np.iterable(bins)
744
+ ):
745
+ # scalar bins cannot be done at top level
746
+ # in a backward compatible way
747
+ # GH38672 relates to categorical dtype
748
+ ser = self.apply(
749
+ Series.value_counts,
750
+ normalize=normalize,
751
+ sort=sort,
752
+ ascending=ascending,
753
+ bins=bins,
754
+ )
755
+ ser.name = name
756
+ ser.index.names = index_names
757
+ return ser
758
+
759
+ # groupby removes null keys from groupings
760
+ mask = ids != -1
761
+ ids, val = ids[mask], val[mask]
762
+
763
+ lab: Index | np.ndarray
764
+ if bins is None:
765
+ lab, lev = algorithms.factorize(val, sort=True)
766
+ llab = lambda lab, inc: lab[inc]
767
+ else:
768
+ # lab is a Categorical with categories an IntervalIndex
769
+ cat_ser = cut(Series(val, copy=False), bins, include_lowest=True)
770
+ cat_obj = cast("Categorical", cat_ser._values)
771
+ lev = cat_obj.categories
772
+ lab = lev.take(
773
+ cat_obj.codes,
774
+ allow_fill=True,
775
+ fill_value=lev._na_value,
776
+ )
777
+ llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
778
+
779
+ if isinstance(lab.dtype, IntervalDtype):
780
+ # TODO: should we do this inside II?
781
+ lab_interval = cast(Interval, lab)
782
+
783
+ sorter = np.lexsort((lab_interval.left, lab_interval.right, ids))
784
+ else:
785
+ sorter = np.lexsort((lab, ids))
786
+
787
+ ids, lab = ids[sorter], lab[sorter]
788
+
789
+ # group boundaries are where group ids change
790
+ idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
791
+ idx = np.r_[0, idchanges]
792
+ if not len(ids):
793
+ idx = idchanges
794
+
795
+ # new values are where sorted labels change
796
+ lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
797
+ inc = np.r_[True, lchanges]
798
+ if not len(val):
799
+ inc = lchanges
800
+ inc[idx] = True # group boundaries are also new values
801
+ out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
802
+
803
+ # num. of times each group should be repeated
804
+ rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
805
+
806
+ # multi-index components
807
+ codes = self._grouper.reconstructed_codes
808
+ codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
809
+ levels = [ping._group_index for ping in self._grouper.groupings] + [lev]
810
+
811
+ if dropna:
812
+ mask = codes[-1] != -1
813
+ if mask.all():
814
+ dropna = False
815
+ else:
816
+ out, codes = out[mask], [level_codes[mask] for level_codes in codes]
817
+
818
+ if normalize:
819
+ out = out.astype("float")
820
+ d = np.diff(np.r_[idx, len(ids)])
821
+ if dropna:
822
+ m = ids[lab == -1]
823
+ np.add.at(d, m, -1)
824
+ acc = rep(d)[mask]
825
+ else:
826
+ acc = rep(d)
827
+ out /= acc
828
+
829
+ if sort and bins is None:
830
+ cat = ids[inc][mask] if dropna else ids[inc]
831
+ sorter = np.lexsort((out if ascending else -out, cat))
832
+ out, codes[-1] = out[sorter], codes[-1][sorter]
833
+
834
+ if bins is not None:
835
+ # for compat. with libgroupby.value_counts need to ensure every
836
+ # bin is present at every index level, null filled with zeros
837
+ diff = np.zeros(len(out), dtype="bool")
838
+ for level_codes in codes[:-1]:
839
+ diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
840
+
841
+ ncat, nbin = diff.sum(), len(levels[-1])
842
+
843
+ left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
844
+
845
+ right = [diff.cumsum() - 1, codes[-1]]
846
+
847
+ # error: Argument 1 to "get_join_indexers" has incompatible type
848
+ # "List[ndarray[Any, Any]]"; expected "List[Union[Union[ExtensionArray,
849
+ # ndarray[Any, Any]], Index, Series]]
850
+ _, idx = get_join_indexers(
851
+ left, right, sort=False, how="left" # type: ignore[arg-type]
852
+ )
853
+ if idx is not None:
854
+ out = np.where(idx != -1, out[idx], 0)
855
+
856
+ if sort:
857
+ sorter = np.lexsort((out if ascending else -out, left[0]))
858
+ out, left[-1] = out[sorter], left[-1][sorter]
859
+
860
+ # build the multi-index w/ full levels
861
+ def build_codes(lev_codes: np.ndarray) -> np.ndarray:
862
+ return np.repeat(lev_codes[diff], nbin)
863
+
864
+ codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
865
+ codes.append(left[-1])
866
+
867
+ mi = MultiIndex(
868
+ levels=levels, codes=codes, names=index_names, verify_integrity=False
869
+ )
870
+
871
+ if is_integer_dtype(out.dtype):
872
+ out = ensure_int64(out)
873
+ result = self.obj._constructor(out, index=mi, name=name)
874
+ if not self.as_index:
875
+ result = result.reset_index()
876
+ return result
877
+
878
+ def fillna(
879
+ self,
880
+ value: object | ArrayLike | None = None,
881
+ method: FillnaOptions | None = None,
882
+ axis: Axis | None | lib.NoDefault = lib.no_default,
883
+ inplace: bool = False,
884
+ limit: int | None = None,
885
+ downcast: dict | None | lib.NoDefault = lib.no_default,
886
+ ) -> Series | None:
887
+ """
888
+ Fill NA/NaN values using the specified method within groups.
889
+
890
+ .. deprecated:: 2.2.0
891
+ This method is deprecated and will be removed in a future version.
892
+ Use the :meth:`.SeriesGroupBy.ffill` or :meth:`.SeriesGroupBy.bfill`
893
+ for forward or backward filling instead. If you want to fill with a
894
+ single value, use :meth:`Series.fillna` instead.
895
+
896
+ Parameters
897
+ ----------
898
+ value : scalar, dict, Series, or DataFrame
899
+ Value to use to fill holes (e.g. 0), alternately a
900
+ dict/Series/DataFrame of values specifying which value to use for
901
+ each index (for a Series) or column (for a DataFrame). Values not
902
+ in the dict/Series/DataFrame will not be filled. This value cannot
903
+ be a list. Users wanting to use the ``value`` argument and not ``method``
904
+ should prefer :meth:`.Series.fillna` as this
905
+ will produce the same result and be more performant.
906
+ method : {{'bfill', 'ffill', None}}, default None
907
+ Method to use for filling holes. ``'ffill'`` will propagate
908
+ the last valid observation forward within a group.
909
+ ``'bfill'`` will use next valid observation to fill the gap.
910
+ axis : {0 or 'index', 1 or 'columns'}
911
+ Unused, only for compatibility with :meth:`DataFrameGroupBy.fillna`.
912
+ inplace : bool, default False
913
+ Broken. Do not set to True.
914
+ limit : int, default None
915
+ If method is specified, this is the maximum number of consecutive
916
+ NaN values to forward/backward fill within a group. In other words,
917
+ if there is a gap with more than this number of consecutive NaNs,
918
+ it will only be partially filled. If method is not specified, this is the
919
+ maximum number of entries along the entire axis where NaNs will be
920
+ filled. Must be greater than 0 if not None.
921
+ downcast : dict, default is None
922
+ A dict of item->dtype of what to downcast if possible,
923
+ or the string 'infer' which will try to downcast to an appropriate
924
+ equal type (e.g. float64 to int64 if possible).
925
+
926
+ Returns
927
+ -------
928
+ Series
929
+ Object with missing values filled within groups.
930
+
931
+ See Also
932
+ --------
933
+ ffill : Forward fill values within a group.
934
+ bfill : Backward fill values within a group.
935
+
936
+ Examples
937
+ --------
938
+ For SeriesGroupBy:
939
+
940
+ >>> lst = ['cat', 'cat', 'cat', 'mouse', 'mouse']
941
+ >>> ser = pd.Series([1, None, None, 2, None], index=lst)
942
+ >>> ser
943
+ cat 1.0
944
+ cat NaN
945
+ cat NaN
946
+ mouse 2.0
947
+ mouse NaN
948
+ dtype: float64
949
+ >>> ser.groupby(level=0).fillna(0, limit=1)
950
+ cat 1.0
951
+ cat 0.0
952
+ cat NaN
953
+ mouse 2.0
954
+ mouse 0.0
955
+ dtype: float64
956
+ """
957
+ warnings.warn(
958
+ f"{type(self).__name__}.fillna is deprecated and "
959
+ "will be removed in a future version. Use obj.ffill() or obj.bfill() "
960
+ "for forward or backward filling instead. If you want to fill with a "
961
+ f"single value, use {type(self.obj).__name__}.fillna instead",
962
+ FutureWarning,
963
+ stacklevel=find_stack_level(),
964
+ )
965
+ result = self._op_via_apply(
966
+ "fillna",
967
+ value=value,
968
+ method=method,
969
+ axis=axis,
970
+ inplace=inplace,
971
+ limit=limit,
972
+ downcast=downcast,
973
+ )
974
+ return result
975
+
976
+ def take(
977
+ self,
978
+ indices: TakeIndexer,
979
+ axis: Axis | lib.NoDefault = lib.no_default,
980
+ **kwargs,
981
+ ) -> Series:
982
+ """
983
+ Return the elements in the given *positional* indices in each group.
984
+
985
+ This means that we are not indexing according to actual values in
986
+ the index attribute of the object. We are indexing according to the
987
+ actual position of the element in the object.
988
+
989
+ If a requested index does not exist for some group, this method will raise.
990
+ To get similar behavior that ignores indices that don't exist, see
991
+ :meth:`.SeriesGroupBy.nth`.
992
+
993
+ Parameters
994
+ ----------
995
+ indices : array-like
996
+ An array of ints indicating which positions to take in each group.
997
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
998
+ The axis on which to select elements. ``0`` means that we are
999
+ selecting rows, ``1`` means that we are selecting columns.
1000
+ For `SeriesGroupBy` this parameter is unused and defaults to 0.
1001
+
1002
+ .. deprecated:: 2.1.0
1003
+ For axis=1, operate on the underlying object instead. Otherwise
1004
+ the axis keyword is not necessary.
1005
+
1006
+ **kwargs
1007
+ For compatibility with :meth:`numpy.take`. Has no effect on the
1008
+ output.
1009
+
1010
+ Returns
1011
+ -------
1012
+ Series
1013
+ A Series containing the elements taken from each group.
1014
+
1015
+ See Also
1016
+ --------
1017
+ Series.take : Take elements from a Series along an axis.
1018
+ Series.loc : Select a subset of a DataFrame by labels.
1019
+ Series.iloc : Select a subset of a DataFrame by positions.
1020
+ numpy.take : Take elements from an array along an axis.
1021
+ SeriesGroupBy.nth : Similar to take, won't raise if indices don't exist.
1022
+
1023
+ Examples
1024
+ --------
1025
+ >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
1026
+ ... ('parrot', 'bird', 24.0),
1027
+ ... ('lion', 'mammal', 80.5),
1028
+ ... ('monkey', 'mammal', np.nan),
1029
+ ... ('rabbit', 'mammal', 15.0)],
1030
+ ... columns=['name', 'class', 'max_speed'],
1031
+ ... index=[4, 3, 2, 1, 0])
1032
+ >>> df
1033
+ name class max_speed
1034
+ 4 falcon bird 389.0
1035
+ 3 parrot bird 24.0
1036
+ 2 lion mammal 80.5
1037
+ 1 monkey mammal NaN
1038
+ 0 rabbit mammal 15.0
1039
+ >>> gb = df["name"].groupby([1, 1, 2, 2, 2])
1040
+
1041
+ Take elements at positions 0 and 1 along the axis 0 in each group (default).
1042
+
1043
+ >>> gb.take([0, 1])
1044
+ 1 4 falcon
1045
+ 3 parrot
1046
+ 2 2 lion
1047
+ 1 monkey
1048
+ Name: name, dtype: object
1049
+
1050
+ We may take elements using negative integers for positive indices,
1051
+ starting from the end of the object, just like with Python lists.
1052
+
1053
+ >>> gb.take([-1, -2])
1054
+ 1 3 parrot
1055
+ 4 falcon
1056
+ 2 0 rabbit
1057
+ 1 monkey
1058
+ Name: name, dtype: object
1059
+ """
1060
+ result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs)
1061
+ return result
1062
+
1063
+ def skew(
1064
+ self,
1065
+ axis: Axis | lib.NoDefault = lib.no_default,
1066
+ skipna: bool = True,
1067
+ numeric_only: bool = False,
1068
+ **kwargs,
1069
+ ) -> Series:
1070
+ """
1071
+ Return unbiased skew within groups.
1072
+
1073
+ Normalized by N-1.
1074
+
1075
+ Parameters
1076
+ ----------
1077
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
1078
+ Axis for the function to be applied on.
1079
+ This parameter is only for compatibility with DataFrame and is unused.
1080
+
1081
+ .. deprecated:: 2.1.0
1082
+ For axis=1, operate on the underlying object instead. Otherwise
1083
+ the axis keyword is not necessary.
1084
+
1085
+ skipna : bool, default True
1086
+ Exclude NA/null values when computing the result.
1087
+
1088
+ numeric_only : bool, default False
1089
+ Include only float, int, boolean columns. Not implemented for Series.
1090
+
1091
+ **kwargs
1092
+ Additional keyword arguments to be passed to the function.
1093
+
1094
+ Returns
1095
+ -------
1096
+ Series
1097
+
1098
+ See Also
1099
+ --------
1100
+ Series.skew : Return unbiased skew over requested axis.
1101
+
1102
+ Examples
1103
+ --------
1104
+ >>> ser = pd.Series([390., 350., 357., np.nan, 22., 20., 30.],
1105
+ ... index=['Falcon', 'Falcon', 'Falcon', 'Falcon',
1106
+ ... 'Parrot', 'Parrot', 'Parrot'],
1107
+ ... name="Max Speed")
1108
+ >>> ser
1109
+ Falcon 390.0
1110
+ Falcon 350.0
1111
+ Falcon 357.0
1112
+ Falcon NaN
1113
+ Parrot 22.0
1114
+ Parrot 20.0
1115
+ Parrot 30.0
1116
+ Name: Max Speed, dtype: float64
1117
+ >>> ser.groupby(level=0).skew()
1118
+ Falcon 1.525174
1119
+ Parrot 1.457863
1120
+ Name: Max Speed, dtype: float64
1121
+ >>> ser.groupby(level=0).skew(skipna=False)
1122
+ Falcon NaN
1123
+ Parrot 1.457863
1124
+ Name: Max Speed, dtype: float64
1125
+ """
1126
+ if axis is lib.no_default:
1127
+ axis = 0
1128
+
1129
+ if axis != 0:
1130
+ result = self._op_via_apply(
1131
+ "skew",
1132
+ axis=axis,
1133
+ skipna=skipna,
1134
+ numeric_only=numeric_only,
1135
+ **kwargs,
1136
+ )
1137
+ return result
1138
+
1139
+ def alt(obj):
1140
+ # This should not be reached since the cython path should raise
1141
+ # TypeError and not NotImplementedError.
1142
+ raise TypeError(f"'skew' is not supported for dtype={obj.dtype}")
1143
+
1144
+ return self._cython_agg_general(
1145
+ "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs
1146
+ )
1147
+
1148
+ @property
1149
+ @doc(Series.plot.__doc__)
1150
+ def plot(self) -> GroupByPlot:
1151
+ result = GroupByPlot(self)
1152
+ return result
1153
+
1154
+ @doc(Series.nlargest.__doc__)
1155
+ def nlargest(
1156
+ self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
1157
+ ) -> Series:
1158
+ f = partial(Series.nlargest, n=n, keep=keep)
1159
+ data = self._obj_with_exclusions
1160
+ # Don't change behavior if result index happens to be the same, i.e.
1161
+ # already ordered and n >= all group sizes.
1162
+ result = self._python_apply_general(f, data, not_indexed_same=True)
1163
+ return result
1164
+
1165
+ @doc(Series.nsmallest.__doc__)
1166
+ def nsmallest(
1167
+ self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
1168
+ ) -> Series:
1169
+ f = partial(Series.nsmallest, n=n, keep=keep)
1170
+ data = self._obj_with_exclusions
1171
+ # Don't change behavior if result index happens to be the same, i.e.
1172
+ # already ordered and n >= all group sizes.
1173
+ result = self._python_apply_general(f, data, not_indexed_same=True)
1174
+ return result
1175
+
1176
+ @doc(Series.idxmin.__doc__)
1177
+ def idxmin(
1178
+ self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True
1179
+ ) -> Series:
1180
+ return self._idxmax_idxmin("idxmin", axis=axis, skipna=skipna)
1181
+
1182
+ @doc(Series.idxmax.__doc__)
1183
+ def idxmax(
1184
+ self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True
1185
+ ) -> Series:
1186
+ return self._idxmax_idxmin("idxmax", axis=axis, skipna=skipna)
1187
+
1188
+ @doc(Series.corr.__doc__)
1189
+ def corr(
1190
+ self,
1191
+ other: Series,
1192
+ method: CorrelationMethod = "pearson",
1193
+ min_periods: int | None = None,
1194
+ ) -> Series:
1195
+ result = self._op_via_apply(
1196
+ "corr", other=other, method=method, min_periods=min_periods
1197
+ )
1198
+ return result
1199
+
1200
+ @doc(Series.cov.__doc__)
1201
+ def cov(
1202
+ self, other: Series, min_periods: int | None = None, ddof: int | None = 1
1203
+ ) -> Series:
1204
+ result = self._op_via_apply(
1205
+ "cov", other=other, min_periods=min_periods, ddof=ddof
1206
+ )
1207
+ return result
1208
+
1209
+ @property
1210
+ def is_monotonic_increasing(self) -> Series:
1211
+ """
1212
+ Return whether each group's values are monotonically increasing.
1213
+
1214
+ Returns
1215
+ -------
1216
+ Series
1217
+
1218
+ Examples
1219
+ --------
1220
+ >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])
1221
+ >>> s.groupby(level=0).is_monotonic_increasing
1222
+ Falcon False
1223
+ Parrot True
1224
+ dtype: bool
1225
+ """
1226
+ return self.apply(lambda ser: ser.is_monotonic_increasing)
1227
+
1228
+ @property
1229
+ def is_monotonic_decreasing(self) -> Series:
1230
+ """
1231
+ Return whether each group's values are monotonically decreasing.
1232
+
1233
+ Returns
1234
+ -------
1235
+ Series
1236
+
1237
+ Examples
1238
+ --------
1239
+ >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])
1240
+ >>> s.groupby(level=0).is_monotonic_decreasing
1241
+ Falcon True
1242
+ Parrot False
1243
+ dtype: bool
1244
+ """
1245
+ return self.apply(lambda ser: ser.is_monotonic_decreasing)
1246
+
1247
+ @doc(Series.hist.__doc__)
1248
+ def hist(
1249
+ self,
1250
+ by=None,
1251
+ ax=None,
1252
+ grid: bool = True,
1253
+ xlabelsize: int | None = None,
1254
+ xrot: float | None = None,
1255
+ ylabelsize: int | None = None,
1256
+ yrot: float | None = None,
1257
+ figsize: tuple[int, int] | None = None,
1258
+ bins: int | Sequence[int] = 10,
1259
+ backend: str | None = None,
1260
+ legend: bool = False,
1261
+ **kwargs,
1262
+ ):
1263
+ result = self._op_via_apply(
1264
+ "hist",
1265
+ by=by,
1266
+ ax=ax,
1267
+ grid=grid,
1268
+ xlabelsize=xlabelsize,
1269
+ xrot=xrot,
1270
+ ylabelsize=ylabelsize,
1271
+ yrot=yrot,
1272
+ figsize=figsize,
1273
+ bins=bins,
1274
+ backend=backend,
1275
+ legend=legend,
1276
+ **kwargs,
1277
+ )
1278
+ return result
1279
+
1280
+ @property
1281
+ @doc(Series.dtype.__doc__)
1282
+ def dtype(self) -> Series:
1283
+ return self.apply(lambda ser: ser.dtype)
1284
+
1285
+ def unique(self) -> Series:
1286
+ """
1287
+ Return unique values for each group.
1288
+
1289
+ It returns unique values for each of the grouped values. Returned in
1290
+ order of appearance. Hash table-based unique, therefore does NOT sort.
1291
+
1292
+ Returns
1293
+ -------
1294
+ Series
1295
+ Unique values for each of the grouped values.
1296
+
1297
+ See Also
1298
+ --------
1299
+ Series.unique : Return unique values of Series object.
1300
+
1301
+ Examples
1302
+ --------
1303
+ >>> df = pd.DataFrame([('Chihuahua', 'dog', 6.1),
1304
+ ... ('Beagle', 'dog', 15.2),
1305
+ ... ('Chihuahua', 'dog', 6.9),
1306
+ ... ('Persian', 'cat', 9.2),
1307
+ ... ('Chihuahua', 'dog', 7),
1308
+ ... ('Persian', 'cat', 8.8)],
1309
+ ... columns=['breed', 'animal', 'height_in'])
1310
+ >>> df
1311
+ breed animal height_in
1312
+ 0 Chihuahua dog 6.1
1313
+ 1 Beagle dog 15.2
1314
+ 2 Chihuahua dog 6.9
1315
+ 3 Persian cat 9.2
1316
+ 4 Chihuahua dog 7.0
1317
+ 5 Persian cat 8.8
1318
+ >>> ser = df.groupby('animal')['breed'].unique()
1319
+ >>> ser
1320
+ animal
1321
+ cat [Persian]
1322
+ dog [Chihuahua, Beagle]
1323
+ Name: breed, dtype: object
1324
+ """
1325
+ result = self._op_via_apply("unique")
1326
+ return result
1327
+
1328
+
1329
+ class DataFrameGroupBy(GroupBy[DataFrame]):
1330
+ _agg_examples_doc = dedent(
1331
+ """
1332
+ Examples
1333
+ --------
1334
+ >>> data = {"A": [1, 1, 2, 2],
1335
+ ... "B": [1, 2, 3, 4],
1336
+ ... "C": [0.362838, 0.227877, 1.267767, -0.562860]}
1337
+ >>> df = pd.DataFrame(data)
1338
+ >>> df
1339
+ A B C
1340
+ 0 1 1 0.362838
1341
+ 1 1 2 0.227877
1342
+ 2 2 3 1.267767
1343
+ 3 2 4 -0.562860
1344
+
1345
+ The aggregation is for each column.
1346
+
1347
+ >>> df.groupby('A').agg('min')
1348
+ B C
1349
+ A
1350
+ 1 1 0.227877
1351
+ 2 3 -0.562860
1352
+
1353
+ Multiple aggregations
1354
+
1355
+ >>> df.groupby('A').agg(['min', 'max'])
1356
+ B C
1357
+ min max min max
1358
+ A
1359
+ 1 1 2 0.227877 0.362838
1360
+ 2 3 4 -0.562860 1.267767
1361
+
1362
+ Select a column for aggregation
1363
+
1364
+ >>> df.groupby('A').B.agg(['min', 'max'])
1365
+ min max
1366
+ A
1367
+ 1 1 2
1368
+ 2 3 4
1369
+
1370
+ User-defined function for aggregation
1371
+
1372
+ >>> df.groupby('A').agg(lambda x: sum(x) + 2)
1373
+ B C
1374
+ A
1375
+ 1 5 2.590715
1376
+ 2 9 2.704907
1377
+
1378
+ Different aggregations per column
1379
+
1380
+ >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
1381
+ B C
1382
+ min max sum
1383
+ A
1384
+ 1 1 2 0.590715
1385
+ 2 3 4 0.704907
1386
+
1387
+ To control the output names with different aggregations per column,
1388
+ pandas supports "named aggregation"
1389
+
1390
+ >>> df.groupby("A").agg(
1391
+ ... b_min=pd.NamedAgg(column="B", aggfunc="min"),
1392
+ ... c_sum=pd.NamedAgg(column="C", aggfunc="sum")
1393
+ ... )
1394
+ b_min c_sum
1395
+ A
1396
+ 1 1 0.590715
1397
+ 2 3 0.704907
1398
+
1399
+ - The keywords are the *output* column names
1400
+ - The values are tuples whose first element is the column to select
1401
+ and the second element is the aggregation to apply to that column.
1402
+ Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
1403
+ ``['column', 'aggfunc']`` to make it clearer what the arguments are.
1404
+ As usual, the aggregation can be a callable or a string alias.
1405
+
1406
+ See :ref:`groupby.aggregate.named` for more.
1407
+
1408
+ .. versionchanged:: 1.3.0
1409
+
1410
+ The resulting dtype will reflect the return value of the aggregating function.
1411
+
1412
+ >>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min())
1413
+ B
1414
+ A
1415
+ 1 1.0
1416
+ 2 3.0
1417
+ """
1418
+ )
1419
+
1420
+ @doc(_agg_template_frame, examples=_agg_examples_doc, klass="DataFrame")
1421
+ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
1422
+ relabeling, func, columns, order = reconstruct_func(func, **kwargs)
1423
+ func = maybe_mangle_lambdas(func)
1424
+
1425
+ if maybe_use_numba(engine):
1426
+ # Not all agg functions support numba, only propagate numba kwargs
1427
+ # if user asks for numba
1428
+ kwargs["engine"] = engine
1429
+ kwargs["engine_kwargs"] = engine_kwargs
1430
+
1431
+ op = GroupByApply(self, func, args=args, kwargs=kwargs)
1432
+ result = op.agg()
1433
+ if not is_dict_like(func) and result is not None:
1434
+ # GH #52849
1435
+ if not self.as_index and is_list_like(func):
1436
+ return result.reset_index()
1437
+ else:
1438
+ return result
1439
+ elif relabeling:
1440
+ # this should be the only (non-raising) case with relabeling
1441
+ # used reordered index of columns
1442
+ result = cast(DataFrame, result)
1443
+ result = result.iloc[:, order]
1444
+ result = cast(DataFrame, result)
1445
+ # error: Incompatible types in assignment (expression has type
1446
+ # "Optional[List[str]]", variable has type
1447
+ # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]],
1448
+ # Index, Series], Sequence[Any]]")
1449
+ result.columns = columns # type: ignore[assignment]
1450
+
1451
+ if result is None:
1452
+ # Remove the kwargs we inserted
1453
+ # (already stored in engine, engine_kwargs arguments)
1454
+ if "engine" in kwargs:
1455
+ del kwargs["engine"]
1456
+ del kwargs["engine_kwargs"]
1457
+ # at this point func is not a str, list-like, dict-like,
1458
+ # or a known callable(e.g. sum)
1459
+ if maybe_use_numba(engine):
1460
+ return self._aggregate_with_numba(
1461
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
1462
+ )
1463
+ # grouper specific aggregations
1464
+ if self._grouper.nkeys > 1:
1465
+ # test_groupby_as_index_series_scalar gets here with 'not self.as_index'
1466
+ return self._python_agg_general(func, *args, **kwargs)
1467
+ elif args or kwargs:
1468
+ # test_pass_args_kwargs gets here (with and without as_index)
1469
+ # can't return early
1470
+ result = self._aggregate_frame(func, *args, **kwargs)
1471
+
1472
+ elif self.axis == 1:
1473
+ # _aggregate_multiple_funcs does not allow self.axis == 1
1474
+ # Note: axis == 1 precludes 'not self.as_index', see __init__
1475
+ result = self._aggregate_frame(func)
1476
+ return result
1477
+
1478
+ else:
1479
+ # try to treat as if we are passing a list
1480
+ gba = GroupByApply(self, [func], args=(), kwargs={})
1481
+ try:
1482
+ result = gba.agg()
1483
+
1484
+ except ValueError as err:
1485
+ if "No objects to concatenate" not in str(err):
1486
+ raise
1487
+ # _aggregate_frame can fail with e.g. func=Series.mode,
1488
+ # where it expects 1D values but would be getting 2D values
1489
+ # In other tests, using aggregate_frame instead of GroupByApply
1490
+ # would give correct values but incorrect dtypes
1491
+ # object vs float64 in test_cython_agg_empty_buckets
1492
+ # float64 vs int64 in test_category_order_apply
1493
+ result = self._aggregate_frame(func)
1494
+
1495
+ else:
1496
+ # GH#32040, GH#35246
1497
+ # e.g. test_groupby_as_index_select_column_sum_empty_df
1498
+ result = cast(DataFrame, result)
1499
+ result.columns = self._obj_with_exclusions.columns.copy()
1500
+
1501
+ if not self.as_index:
1502
+ result = self._insert_inaxis_grouper(result)
1503
+ result.index = default_index(len(result))
1504
+
1505
+ return result
1506
+
1507
+ agg = aggregate
1508
+
1509
+ def _python_agg_general(self, func, *args, **kwargs):
1510
+ orig_func = func
1511
+ func = com.is_builtin_func(func)
1512
+ if orig_func != func:
1513
+ alias = com._builtin_table_alias[func]
1514
+ warn_alias_replacement(self, orig_func, alias)
1515
+ f = lambda x: func(x, *args, **kwargs)
1516
+
1517
+ if self.ngroups == 0:
1518
+ # e.g. test_evaluate_with_empty_groups different path gets different
1519
+ # result dtype in empty case.
1520
+ return self._python_apply_general(f, self._selected_obj, is_agg=True)
1521
+
1522
+ obj = self._obj_with_exclusions
1523
+ if self.axis == 1:
1524
+ obj = obj.T
1525
+
1526
+ if not len(obj.columns):
1527
+ # e.g. test_margins_no_values_no_cols
1528
+ return self._python_apply_general(f, self._selected_obj)
1529
+
1530
+ output: dict[int, ArrayLike] = {}
1531
+ for idx, (name, ser) in enumerate(obj.items()):
1532
+ result = self._grouper.agg_series(ser, f)
1533
+ output[idx] = result
1534
+
1535
+ res = self.obj._constructor(output)
1536
+ res.columns = obj.columns.copy(deep=False)
1537
+ return self._wrap_aggregated_output(res)
1538
+
1539
+ def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
1540
+ if self._grouper.nkeys != 1:
1541
+ raise AssertionError("Number of keys must be 1")
1542
+
1543
+ obj = self._obj_with_exclusions
1544
+
1545
+ result: dict[Hashable, NDFrame | np.ndarray] = {}
1546
+ for name, grp_df in self._grouper.get_iterator(obj, self.axis):
1547
+ fres = func(grp_df, *args, **kwargs)
1548
+ result[name] = fres
1549
+
1550
+ result_index = self._grouper.result_index
1551
+ other_ax = obj.axes[1 - self.axis]
1552
+ out = self.obj._constructor(result, index=other_ax, columns=result_index)
1553
+ if self.axis == 0:
1554
+ out = out.T
1555
+
1556
+ return out
1557
+
1558
+ def _wrap_applied_output(
1559
+ self,
1560
+ data: DataFrame,
1561
+ values: list,
1562
+ not_indexed_same: bool = False,
1563
+ is_transform: bool = False,
1564
+ ):
1565
+ if len(values) == 0:
1566
+ if is_transform:
1567
+ # GH#47787 see test_group_on_empty_multiindex
1568
+ res_index = data.index
1569
+ else:
1570
+ res_index = self._grouper.result_index
1571
+
1572
+ result = self.obj._constructor(index=res_index, columns=data.columns)
1573
+ result = result.astype(data.dtypes, copy=False)
1574
+ return result
1575
+
1576
+ # GH12824
1577
+ # using values[0] here breaks test_groupby_apply_none_first
1578
+ first_not_none = next(com.not_none(*values), None)
1579
+
1580
+ if first_not_none is None:
1581
+ # GH9684 - All values are None, return an empty frame.
1582
+ return self.obj._constructor()
1583
+ elif isinstance(first_not_none, DataFrame):
1584
+ return self._concat_objects(
1585
+ values,
1586
+ not_indexed_same=not_indexed_same,
1587
+ is_transform=is_transform,
1588
+ )
1589
+
1590
+ key_index = self._grouper.result_index if self.as_index else None
1591
+
1592
+ if isinstance(first_not_none, (np.ndarray, Index)):
1593
+ # GH#1738: values is list of arrays of unequal lengths
1594
+ # fall through to the outer else clause
1595
+ # TODO: sure this is right? we used to do this
1596
+ # after raising AttributeError above
1597
+ # GH 18930
1598
+ if not is_hashable(self._selection):
1599
+ # error: Need type annotation for "name"
1600
+ name = tuple(self._selection) # type: ignore[var-annotated, arg-type]
1601
+ else:
1602
+ # error: Incompatible types in assignment
1603
+ # (expression has type "Hashable", variable
1604
+ # has type "Tuple[Any, ...]")
1605
+ name = self._selection # type: ignore[assignment]
1606
+ return self.obj._constructor_sliced(values, index=key_index, name=name)
1607
+ elif not isinstance(first_not_none, Series):
1608
+ # values are not series or array-like but scalars
1609
+ # self._selection not passed through to Series as the
1610
+ # result should not take the name of original selection
1611
+ # of columns
1612
+ if self.as_index:
1613
+ return self.obj._constructor_sliced(values, index=key_index)
1614
+ else:
1615
+ result = self.obj._constructor(values, columns=[self._selection])
1616
+ result = self._insert_inaxis_grouper(result)
1617
+ return result
1618
+ else:
1619
+ # values are Series
1620
+ return self._wrap_applied_output_series(
1621
+ values,
1622
+ not_indexed_same,
1623
+ first_not_none,
1624
+ key_index,
1625
+ is_transform,
1626
+ )
1627
+
1628
+ def _wrap_applied_output_series(
1629
+ self,
1630
+ values: list[Series],
1631
+ not_indexed_same: bool,
1632
+ first_not_none,
1633
+ key_index: Index | None,
1634
+ is_transform: bool,
1635
+ ) -> DataFrame | Series:
1636
+ kwargs = first_not_none._construct_axes_dict()
1637
+ backup = Series(**kwargs)
1638
+ values = [x if (x is not None) else backup for x in values]
1639
+
1640
+ all_indexed_same = all_indexes_same(x.index for x in values)
1641
+
1642
+ if not all_indexed_same:
1643
+ # GH 8467
1644
+ return self._concat_objects(
1645
+ values,
1646
+ not_indexed_same=True,
1647
+ is_transform=is_transform,
1648
+ )
1649
+
1650
+ # Combine values
1651
+ # vstack+constructor is faster than concat and handles MI-columns
1652
+ stacked_values = np.vstack([np.asarray(v) for v in values])
1653
+
1654
+ if self.axis == 0:
1655
+ index = key_index
1656
+ columns = first_not_none.index.copy()
1657
+ if columns.name is None:
1658
+ # GH6124 - propagate name of Series when it's consistent
1659
+ names = {v.name for v in values}
1660
+ if len(names) == 1:
1661
+ columns.name = next(iter(names))
1662
+ else:
1663
+ index = first_not_none.index
1664
+ columns = key_index
1665
+ stacked_values = stacked_values.T
1666
+
1667
+ if stacked_values.dtype == object:
1668
+ # We'll have the DataFrame constructor do inference
1669
+ stacked_values = stacked_values.tolist()
1670
+ result = self.obj._constructor(stacked_values, index=index, columns=columns)
1671
+
1672
+ if not self.as_index:
1673
+ result = self._insert_inaxis_grouper(result)
1674
+
1675
+ return self._reindex_output(result)
1676
+
1677
+ def _cython_transform(
1678
+ self,
1679
+ how: str,
1680
+ numeric_only: bool = False,
1681
+ axis: AxisInt = 0,
1682
+ **kwargs,
1683
+ ) -> DataFrame:
1684
+ assert axis == 0 # handled by caller
1685
+
1686
+ # With self.axis == 0, we have multi-block tests
1687
+ # e.g. test_rank_min_int, test_cython_transform_frame
1688
+ # test_transform_numeric_ret
1689
+ # With self.axis == 1, _get_data_to_aggregate does a transpose
1690
+ # so we always have a single block.
1691
+ mgr: Manager2D = self._get_data_to_aggregate(
1692
+ numeric_only=numeric_only, name=how
1693
+ )
1694
+
1695
+ def arr_func(bvalues: ArrayLike) -> ArrayLike:
1696
+ return self._grouper._cython_operation(
1697
+ "transform", bvalues, how, 1, **kwargs
1698
+ )
1699
+
1700
+ # We could use `mgr.apply` here and not have to set_axis, but
1701
+ # we would have to do shape gymnastics for ArrayManager compat
1702
+ res_mgr = mgr.grouped_reduce(arr_func)
1703
+ res_mgr.set_axis(1, mgr.axes[1])
1704
+
1705
+ res_df = self.obj._constructor_from_mgr(res_mgr, axes=res_mgr.axes)
1706
+ res_df = self._maybe_transpose_result(res_df)
1707
+ return res_df
1708
+
1709
+ def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs):
1710
+ if maybe_use_numba(engine):
1711
+ return self._transform_with_numba(
1712
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
1713
+ )
1714
+ from pandas.core.reshape.concat import concat
1715
+
1716
+ applied = []
1717
+ obj = self._obj_with_exclusions
1718
+ gen = self._grouper.get_iterator(obj, axis=self.axis)
1719
+ fast_path, slow_path = self._define_paths(func, *args, **kwargs)
1720
+
1721
+ # Determine whether to use slow or fast path by evaluating on the first group.
1722
+ # Need to handle the case of an empty generator and process the result so that
1723
+ # it does not need to be computed again.
1724
+ try:
1725
+ name, group = next(gen)
1726
+ except StopIteration:
1727
+ pass
1728
+ else:
1729
+ # 2023-02-27 No tests broken by disabling this pinning
1730
+ object.__setattr__(group, "name", name)
1731
+ try:
1732
+ path, res = self._choose_path(fast_path, slow_path, group)
1733
+ except ValueError as err:
1734
+ # e.g. test_transform_with_non_scalar_group
1735
+ msg = "transform must return a scalar value for each group"
1736
+ raise ValueError(msg) from err
1737
+ if group.size > 0:
1738
+ res = _wrap_transform_general_frame(self.obj, group, res)
1739
+ applied.append(res)
1740
+
1741
+ # Compute and process with the remaining groups
1742
+ for name, group in gen:
1743
+ if group.size == 0:
1744
+ continue
1745
+ # 2023-02-27 No tests broken by disabling this pinning
1746
+ object.__setattr__(group, "name", name)
1747
+ res = path(group)
1748
+
1749
+ res = _wrap_transform_general_frame(self.obj, group, res)
1750
+ applied.append(res)
1751
+
1752
+ concat_index = obj.columns if self.axis == 0 else obj.index
1753
+ other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
1754
+ concatenated = concat(applied, axis=self.axis, verify_integrity=False)
1755
+ concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
1756
+ return self._set_result_index_ordered(concatenated)
1757
+
1758
+ __examples_dataframe_doc = dedent(
1759
+ """
1760
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
1761
+ ... 'foo', 'bar'],
1762
+ ... 'B' : ['one', 'one', 'two', 'three',
1763
+ ... 'two', 'two'],
1764
+ ... 'C' : [1, 5, 5, 2, 5, 5],
1765
+ ... 'D' : [2.0, 5., 8., 1., 2., 9.]})
1766
+ >>> grouped = df.groupby('A')[['C', 'D']]
1767
+ >>> grouped.transform(lambda x: (x - x.mean()) / x.std())
1768
+ C D
1769
+ 0 -1.154701 -0.577350
1770
+ 1 0.577350 0.000000
1771
+ 2 0.577350 1.154701
1772
+ 3 -1.154701 -1.000000
1773
+ 4 0.577350 -0.577350
1774
+ 5 0.577350 1.000000
1775
+
1776
+ Broadcast result of the transformation
1777
+
1778
+ >>> grouped.transform(lambda x: x.max() - x.min())
1779
+ C D
1780
+ 0 4.0 6.0
1781
+ 1 3.0 8.0
1782
+ 2 4.0 6.0
1783
+ 3 3.0 8.0
1784
+ 4 4.0 6.0
1785
+ 5 3.0 8.0
1786
+
1787
+ >>> grouped.transform("mean")
1788
+ C D
1789
+ 0 3.666667 4.0
1790
+ 1 4.000000 5.0
1791
+ 2 3.666667 4.0
1792
+ 3 4.000000 5.0
1793
+ 4 3.666667 4.0
1794
+ 5 4.000000 5.0
1795
+
1796
+ .. versionchanged:: 1.3.0
1797
+
1798
+ The resulting dtype will reflect the return value of the passed ``func``,
1799
+ for example:
1800
+
1801
+ >>> grouped.transform(lambda x: x.astype(int).max())
1802
+ C D
1803
+ 0 5 8
1804
+ 1 5 9
1805
+ 2 5 8
1806
+ 3 5 9
1807
+ 4 5 8
1808
+ 5 5 9
1809
+ """
1810
+ )
1811
+
1812
+ @Substitution(klass="DataFrame", example=__examples_dataframe_doc)
1813
+ @Appender(_transform_template)
1814
+ def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
1815
+ return self._transform(
1816
+ func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
1817
+ )
1818
+
1819
+ def _define_paths(self, func, *args, **kwargs):
1820
+ if isinstance(func, str):
1821
+ fast_path = lambda group: getattr(group, func)(*args, **kwargs)
1822
+ slow_path = lambda group: group.apply(
1823
+ lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
1824
+ )
1825
+ else:
1826
+ fast_path = lambda group: func(group, *args, **kwargs)
1827
+ slow_path = lambda group: group.apply(
1828
+ lambda x: func(x, *args, **kwargs), axis=self.axis
1829
+ )
1830
+ return fast_path, slow_path
1831
+
1832
+ def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
1833
+ path = slow_path
1834
+ res = slow_path(group)
1835
+
1836
+ if self.ngroups == 1:
1837
+ # no need to evaluate multiple paths when only
1838
+ # a single group exists
1839
+ return path, res
1840
+
1841
+ # if we make it here, test if we can use the fast path
1842
+ try:
1843
+ res_fast = fast_path(group)
1844
+ except AssertionError:
1845
+ raise # pragma: no cover
1846
+ except Exception:
1847
+ # GH#29631 For user-defined function, we can't predict what may be
1848
+ # raised; see test_transform.test_transform_fastpath_raises
1849
+ return path, res
1850
+
1851
+ # verify fast path returns either:
1852
+ # a DataFrame with columns equal to group.columns
1853
+ # OR a Series with index equal to group.columns
1854
+ if isinstance(res_fast, DataFrame):
1855
+ if not res_fast.columns.equals(group.columns):
1856
+ return path, res
1857
+ elif isinstance(res_fast, Series):
1858
+ if not res_fast.index.equals(group.columns):
1859
+ return path, res
1860
+ else:
1861
+ return path, res
1862
+
1863
+ if res_fast.equals(res):
1864
+ path = fast_path
1865
+
1866
+ return path, res
1867
+
1868
+ def filter(self, func, dropna: bool = True, *args, **kwargs):
1869
+ """
1870
+ Filter elements from groups that don't satisfy a criterion.
1871
+
1872
+ Elements from groups are filtered if they do not satisfy the
1873
+ boolean criterion specified by func.
1874
+
1875
+ Parameters
1876
+ ----------
1877
+ func : function
1878
+ Criterion to apply to each group. Should return True or False.
1879
+ dropna : bool
1880
+ Drop groups that do not pass the filter. True by default; if False,
1881
+ groups that evaluate False are filled with NaNs.
1882
+
1883
+ Returns
1884
+ -------
1885
+ DataFrame
1886
+
1887
+ Notes
1888
+ -----
1889
+ Each subframe is endowed the attribute 'name' in case you need to know
1890
+ which group you are working on.
1891
+
1892
+ Functions that mutate the passed object can produce unexpected
1893
+ behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
1894
+ for more details.
1895
+
1896
+ Examples
1897
+ --------
1898
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
1899
+ ... 'foo', 'bar'],
1900
+ ... 'B' : [1, 2, 3, 4, 5, 6],
1901
+ ... 'C' : [2.0, 5., 8., 1., 2., 9.]})
1902
+ >>> grouped = df.groupby('A')
1903
+ >>> grouped.filter(lambda x: x['B'].mean() > 3.)
1904
+ A B C
1905
+ 1 bar 2 5.0
1906
+ 3 bar 4 1.0
1907
+ 5 bar 6 9.0
1908
+ """
1909
+ indices = []
1910
+
1911
+ obj = self._selected_obj
1912
+ gen = self._grouper.get_iterator(obj, axis=self.axis)
1913
+
1914
+ for name, group in gen:
1915
+ # 2023-02-27 no tests are broken this pinning, but it is documented in the
1916
+ # docstring above.
1917
+ object.__setattr__(group, "name", name)
1918
+
1919
+ res = func(group, *args, **kwargs)
1920
+
1921
+ try:
1922
+ res = res.squeeze()
1923
+ except AttributeError: # allow e.g., scalars and frames to pass
1924
+ pass
1925
+
1926
+ # interpret the result of the filter
1927
+ if is_bool(res) or (is_scalar(res) and isna(res)):
1928
+ if notna(res) and res:
1929
+ indices.append(self._get_index(name))
1930
+ else:
1931
+ # non scalars aren't allowed
1932
+ raise TypeError(
1933
+ f"filter function returned a {type(res).__name__}, "
1934
+ "but expected a scalar bool"
1935
+ )
1936
+
1937
+ return self._apply_filter(indices, dropna)
1938
+
1939
+ def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy:
1940
+ if self.axis == 1:
1941
+ # GH 37725
1942
+ raise ValueError("Cannot subset columns when using axis=1")
1943
+ # per GH 23566
1944
+ if isinstance(key, tuple) and len(key) > 1:
1945
+ # if len == 1, then it becomes a SeriesGroupBy and this is actually
1946
+ # valid syntax, so don't raise
1947
+ raise ValueError(
1948
+ "Cannot subset columns with a tuple with more than one element. "
1949
+ "Use a list instead."
1950
+ )
1951
+ return super().__getitem__(key)
1952
+
1953
+ def _gotitem(self, key, ndim: int, subset=None):
1954
+ """
1955
+ sub-classes to define
1956
+ return a sliced object
1957
+
1958
+ Parameters
1959
+ ----------
1960
+ key : string / list of selections
1961
+ ndim : {1, 2}
1962
+ requested ndim of result
1963
+ subset : object, default None
1964
+ subset to act on
1965
+ """
1966
+ if ndim == 2:
1967
+ if subset is None:
1968
+ subset = self.obj
1969
+ return DataFrameGroupBy(
1970
+ subset,
1971
+ self.keys,
1972
+ axis=self.axis,
1973
+ level=self.level,
1974
+ grouper=self._grouper,
1975
+ exclusions=self.exclusions,
1976
+ selection=key,
1977
+ as_index=self.as_index,
1978
+ sort=self.sort,
1979
+ group_keys=self.group_keys,
1980
+ observed=self.observed,
1981
+ dropna=self.dropna,
1982
+ )
1983
+ elif ndim == 1:
1984
+ if subset is None:
1985
+ subset = self.obj[key]
1986
+ return SeriesGroupBy(
1987
+ subset,
1988
+ self.keys,
1989
+ level=self.level,
1990
+ grouper=self._grouper,
1991
+ exclusions=self.exclusions,
1992
+ selection=key,
1993
+ as_index=self.as_index,
1994
+ sort=self.sort,
1995
+ group_keys=self.group_keys,
1996
+ observed=self.observed,
1997
+ dropna=self.dropna,
1998
+ )
1999
+
2000
+ raise AssertionError("invalid ndim for _gotitem")
2001
+
2002
+ def _get_data_to_aggregate(
2003
+ self, *, numeric_only: bool = False, name: str | None = None
2004
+ ) -> Manager2D:
2005
+ obj = self._obj_with_exclusions
2006
+ if self.axis == 1:
2007
+ mgr = obj.T._mgr
2008
+ else:
2009
+ mgr = obj._mgr
2010
+
2011
+ if numeric_only:
2012
+ mgr = mgr.get_numeric_data()
2013
+ return mgr
2014
+
2015
+ def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:
2016
+ return self.obj._constructor_from_mgr(mgr, axes=mgr.axes)
2017
+
2018
+ def _apply_to_column_groupbys(self, func) -> DataFrame:
2019
+ from pandas.core.reshape.concat import concat
2020
+
2021
+ obj = self._obj_with_exclusions
2022
+ columns = obj.columns
2023
+ sgbs = [
2024
+ SeriesGroupBy(
2025
+ obj.iloc[:, i],
2026
+ selection=colname,
2027
+ grouper=self._grouper,
2028
+ exclusions=self.exclusions,
2029
+ observed=self.observed,
2030
+ )
2031
+ for i, colname in enumerate(obj.columns)
2032
+ ]
2033
+ results = [func(sgb) for sgb in sgbs]
2034
+
2035
+ if not len(results):
2036
+ # concat would raise
2037
+ res_df = DataFrame([], columns=columns, index=self._grouper.result_index)
2038
+ else:
2039
+ res_df = concat(results, keys=columns, axis=1)
2040
+
2041
+ if not self.as_index:
2042
+ res_df.index = default_index(len(res_df))
2043
+ res_df = self._insert_inaxis_grouper(res_df)
2044
+ return res_df
2045
+
2046
+ def nunique(self, dropna: bool = True) -> DataFrame:
2047
+ """
2048
+ Return DataFrame with counts of unique elements in each position.
2049
+
2050
+ Parameters
2051
+ ----------
2052
+ dropna : bool, default True
2053
+ Don't include NaN in the counts.
2054
+
2055
+ Returns
2056
+ -------
2057
+ nunique: DataFrame
2058
+
2059
+ Examples
2060
+ --------
2061
+ >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
2062
+ ... 'ham', 'ham'],
2063
+ ... 'value1': [1, 5, 5, 2, 5, 5],
2064
+ ... 'value2': list('abbaxy')})
2065
+ >>> df
2066
+ id value1 value2
2067
+ 0 spam 1 a
2068
+ 1 egg 5 b
2069
+ 2 egg 5 b
2070
+ 3 spam 2 a
2071
+ 4 ham 5 x
2072
+ 5 ham 5 y
2073
+
2074
+ >>> df.groupby('id').nunique()
2075
+ value1 value2
2076
+ id
2077
+ egg 1 1
2078
+ ham 1 2
2079
+ spam 2 1
2080
+
2081
+ Check for rows with the same id but conflicting values:
2082
+
2083
+ >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
2084
+ id value1 value2
2085
+ 0 spam 1 a
2086
+ 3 spam 2 a
2087
+ 4 ham 5 x
2088
+ 5 ham 5 y
2089
+ """
2090
+
2091
+ if self.axis != 0:
2092
+ # see test_groupby_crash_on_nunique
2093
+ return self._python_apply_general(
2094
+ lambda sgb: sgb.nunique(dropna), self._obj_with_exclusions, is_agg=True
2095
+ )
2096
+
2097
+ return self._apply_to_column_groupbys(lambda sgb: sgb.nunique(dropna))
2098
+
2099
+ def idxmax(
2100
+ self,
2101
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2102
+ skipna: bool = True,
2103
+ numeric_only: bool = False,
2104
+ ) -> DataFrame:
2105
+ """
2106
+ Return index of first occurrence of maximum over requested axis.
2107
+
2108
+ NA/null values are excluded.
2109
+
2110
+ Parameters
2111
+ ----------
2112
+ axis : {{0 or 'index', 1 or 'columns'}}, default None
2113
+ The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
2114
+ If axis is not provided, grouper's axis is used.
2115
+
2116
+ .. versionchanged:: 2.0.0
2117
+
2118
+ .. deprecated:: 2.1.0
2119
+ For axis=1, operate on the underlying object instead. Otherwise
2120
+ the axis keyword is not necessary.
2121
+
2122
+ skipna : bool, default True
2123
+ Exclude NA/null values. If an entire row/column is NA, the result
2124
+ will be NA.
2125
+ numeric_only : bool, default False
2126
+ Include only `float`, `int` or `boolean` data.
2127
+
2128
+ .. versionadded:: 1.5.0
2129
+
2130
+ Returns
2131
+ -------
2132
+ Series
2133
+ Indexes of maxima along the specified axis.
2134
+
2135
+ Raises
2136
+ ------
2137
+ ValueError
2138
+ * If the row/column is empty
2139
+
2140
+ See Also
2141
+ --------
2142
+ Series.idxmax : Return index of the maximum element.
2143
+
2144
+ Notes
2145
+ -----
2146
+ This method is the DataFrame version of ``ndarray.argmax``.
2147
+
2148
+ Examples
2149
+ --------
2150
+ Consider a dataset containing food consumption in Argentina.
2151
+
2152
+ >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
2153
+ ... 'co2_emissions': [37.2, 19.66, 1712]},
2154
+ ... index=['Pork', 'Wheat Products', 'Beef'])
2155
+
2156
+ >>> df
2157
+ consumption co2_emissions
2158
+ Pork 10.51 37.20
2159
+ Wheat Products 103.11 19.66
2160
+ Beef 55.48 1712.00
2161
+
2162
+ By default, it returns the index for the maximum value in each column.
2163
+
2164
+ >>> df.idxmax()
2165
+ consumption Wheat Products
2166
+ co2_emissions Beef
2167
+ dtype: object
2168
+
2169
+ To return the index for the maximum value in each row, use ``axis="columns"``.
2170
+
2171
+ >>> df.idxmax(axis="columns")
2172
+ Pork co2_emissions
2173
+ Wheat Products consumption
2174
+ Beef co2_emissions
2175
+ dtype: object
2176
+ """
2177
+ return self._idxmax_idxmin(
2178
+ "idxmax", axis=axis, numeric_only=numeric_only, skipna=skipna
2179
+ )
2180
+
2181
+ def idxmin(
2182
+ self,
2183
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2184
+ skipna: bool = True,
2185
+ numeric_only: bool = False,
2186
+ ) -> DataFrame:
2187
+ """
2188
+ Return index of first occurrence of minimum over requested axis.
2189
+
2190
+ NA/null values are excluded.
2191
+
2192
+ Parameters
2193
+ ----------
2194
+ axis : {{0 or 'index', 1 or 'columns'}}, default None
2195
+ The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
2196
+ If axis is not provided, grouper's axis is used.
2197
+
2198
+ .. versionchanged:: 2.0.0
2199
+
2200
+ .. deprecated:: 2.1.0
2201
+ For axis=1, operate on the underlying object instead. Otherwise
2202
+ the axis keyword is not necessary.
2203
+
2204
+ skipna : bool, default True
2205
+ Exclude NA/null values. If an entire row/column is NA, the result
2206
+ will be NA.
2207
+ numeric_only : bool, default False
2208
+ Include only `float`, `int` or `boolean` data.
2209
+
2210
+ .. versionadded:: 1.5.0
2211
+
2212
+ Returns
2213
+ -------
2214
+ Series
2215
+ Indexes of minima along the specified axis.
2216
+
2217
+ Raises
2218
+ ------
2219
+ ValueError
2220
+ * If the row/column is empty
2221
+
2222
+ See Also
2223
+ --------
2224
+ Series.idxmin : Return index of the minimum element.
2225
+
2226
+ Notes
2227
+ -----
2228
+ This method is the DataFrame version of ``ndarray.argmin``.
2229
+
2230
+ Examples
2231
+ --------
2232
+ Consider a dataset containing food consumption in Argentina.
2233
+
2234
+ >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
2235
+ ... 'co2_emissions': [37.2, 19.66, 1712]},
2236
+ ... index=['Pork', 'Wheat Products', 'Beef'])
2237
+
2238
+ >>> df
2239
+ consumption co2_emissions
2240
+ Pork 10.51 37.20
2241
+ Wheat Products 103.11 19.66
2242
+ Beef 55.48 1712.00
2243
+
2244
+ By default, it returns the index for the minimum value in each column.
2245
+
2246
+ >>> df.idxmin()
2247
+ consumption Pork
2248
+ co2_emissions Wheat Products
2249
+ dtype: object
2250
+
2251
+ To return the index for the minimum value in each row, use ``axis="columns"``.
2252
+
2253
+ >>> df.idxmin(axis="columns")
2254
+ Pork consumption
2255
+ Wheat Products co2_emissions
2256
+ Beef consumption
2257
+ dtype: object
2258
+ """
2259
+ return self._idxmax_idxmin(
2260
+ "idxmin", axis=axis, numeric_only=numeric_only, skipna=skipna
2261
+ )
2262
+
2263
+ boxplot = boxplot_frame_groupby
2264
+
2265
+ def value_counts(
2266
+ self,
2267
+ subset: Sequence[Hashable] | None = None,
2268
+ normalize: bool = False,
2269
+ sort: bool = True,
2270
+ ascending: bool = False,
2271
+ dropna: bool = True,
2272
+ ) -> DataFrame | Series:
2273
+ """
2274
+ Return a Series or DataFrame containing counts of unique rows.
2275
+
2276
+ .. versionadded:: 1.4.0
2277
+
2278
+ Parameters
2279
+ ----------
2280
+ subset : list-like, optional
2281
+ Columns to use when counting unique combinations.
2282
+ normalize : bool, default False
2283
+ Return proportions rather than frequencies.
2284
+ sort : bool, default True
2285
+ Sort by frequencies.
2286
+ ascending : bool, default False
2287
+ Sort in ascending order.
2288
+ dropna : bool, default True
2289
+ Don't include counts of rows that contain NA values.
2290
+
2291
+ Returns
2292
+ -------
2293
+ Series or DataFrame
2294
+ Series if the groupby as_index is True, otherwise DataFrame.
2295
+
2296
+ See Also
2297
+ --------
2298
+ Series.value_counts: Equivalent method on Series.
2299
+ DataFrame.value_counts: Equivalent method on DataFrame.
2300
+ SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy.
2301
+
2302
+ Notes
2303
+ -----
2304
+ - If the groupby as_index is True then the returned Series will have a
2305
+ MultiIndex with one level per input column.
2306
+ - If the groupby as_index is False then the returned DataFrame will have an
2307
+ additional column with the value_counts. The column is labelled 'count' or
2308
+ 'proportion', depending on the ``normalize`` parameter.
2309
+
2310
+ By default, rows that contain any NA values are omitted from
2311
+ the result.
2312
+
2313
+ By default, the result will be in descending order so that the
2314
+ first element of each group is the most frequently-occurring row.
2315
+
2316
+ Examples
2317
+ --------
2318
+ >>> df = pd.DataFrame({
2319
+ ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'],
2320
+ ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],
2321
+ ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR']
2322
+ ... })
2323
+
2324
+ >>> df
2325
+ gender education country
2326
+ 0 male low US
2327
+ 1 male medium FR
2328
+ 2 female high US
2329
+ 3 male low FR
2330
+ 4 female high FR
2331
+ 5 male low FR
2332
+
2333
+ >>> df.groupby('gender').value_counts()
2334
+ gender education country
2335
+ female high FR 1
2336
+ US 1
2337
+ male low FR 2
2338
+ US 1
2339
+ medium FR 1
2340
+ Name: count, dtype: int64
2341
+
2342
+ >>> df.groupby('gender').value_counts(ascending=True)
2343
+ gender education country
2344
+ female high FR 1
2345
+ US 1
2346
+ male low US 1
2347
+ medium FR 1
2348
+ low FR 2
2349
+ Name: count, dtype: int64
2350
+
2351
+ >>> df.groupby('gender').value_counts(normalize=True)
2352
+ gender education country
2353
+ female high FR 0.50
2354
+ US 0.50
2355
+ male low FR 0.50
2356
+ US 0.25
2357
+ medium FR 0.25
2358
+ Name: proportion, dtype: float64
2359
+
2360
+ >>> df.groupby('gender', as_index=False).value_counts()
2361
+ gender education country count
2362
+ 0 female high FR 1
2363
+ 1 female high US 1
2364
+ 2 male low FR 2
2365
+ 3 male low US 1
2366
+ 4 male medium FR 1
2367
+
2368
+ >>> df.groupby('gender', as_index=False).value_counts(normalize=True)
2369
+ gender education country proportion
2370
+ 0 female high FR 0.50
2371
+ 1 female high US 0.50
2372
+ 2 male low FR 0.50
2373
+ 3 male low US 0.25
2374
+ 4 male medium FR 0.25
2375
+ """
2376
+ return self._value_counts(subset, normalize, sort, ascending, dropna)
2377
+
2378
+ def fillna(
2379
+ self,
2380
+ value: Hashable | Mapping | Series | DataFrame | None = None,
2381
+ method: FillnaOptions | None = None,
2382
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2383
+ inplace: bool = False,
2384
+ limit: int | None = None,
2385
+ downcast=lib.no_default,
2386
+ ) -> DataFrame | None:
2387
+ """
2388
+ Fill NA/NaN values using the specified method within groups.
2389
+
2390
+ .. deprecated:: 2.2.0
2391
+ This method is deprecated and will be removed in a future version.
2392
+ Use the :meth:`.DataFrameGroupBy.ffill` or :meth:`.DataFrameGroupBy.bfill`
2393
+ for forward or backward filling instead. If you want to fill with a
2394
+ single value, use :meth:`DataFrame.fillna` instead.
2395
+
2396
+ Parameters
2397
+ ----------
2398
+ value : scalar, dict, Series, or DataFrame
2399
+ Value to use to fill holes (e.g. 0), alternately a
2400
+ dict/Series/DataFrame of values specifying which value to use for
2401
+ each index (for a Series) or column (for a DataFrame). Values not
2402
+ in the dict/Series/DataFrame will not be filled. This value cannot
2403
+ be a list. Users wanting to use the ``value`` argument and not ``method``
2404
+ should prefer :meth:`.DataFrame.fillna` as this
2405
+ will produce the same result and be more performant.
2406
+ method : {{'bfill', 'ffill', None}}, default None
2407
+ Method to use for filling holes. ``'ffill'`` will propagate
2408
+ the last valid observation forward within a group.
2409
+ ``'bfill'`` will use next valid observation to fill the gap.
2410
+ axis : {0 or 'index', 1 or 'columns'}
2411
+ Axis along which to fill missing values. When the :class:`DataFrameGroupBy`
2412
+ ``axis`` argument is ``0``, using ``axis=1`` here will produce
2413
+ the same results as :meth:`.DataFrame.fillna`. When the
2414
+ :class:`DataFrameGroupBy` ``axis`` argument is ``1``, using ``axis=0``
2415
+ or ``axis=1`` here will produce the same results.
2416
+ inplace : bool, default False
2417
+ Broken. Do not set to True.
2418
+ limit : int, default None
2419
+ If method is specified, this is the maximum number of consecutive
2420
+ NaN values to forward/backward fill within a group. In other words,
2421
+ if there is a gap with more than this number of consecutive NaNs,
2422
+ it will only be partially filled. If method is not specified, this is the
2423
+ maximum number of entries along the entire axis where NaNs will be
2424
+ filled. Must be greater than 0 if not None.
2425
+ downcast : dict, default is None
2426
+ A dict of item->dtype of what to downcast if possible,
2427
+ or the string 'infer' which will try to downcast to an appropriate
2428
+ equal type (e.g. float64 to int64 if possible).
2429
+
2430
+ Returns
2431
+ -------
2432
+ DataFrame
2433
+ Object with missing values filled.
2434
+
2435
+ See Also
2436
+ --------
2437
+ ffill : Forward fill values within a group.
2438
+ bfill : Backward fill values within a group.
2439
+
2440
+ Examples
2441
+ --------
2442
+ >>> df = pd.DataFrame(
2443
+ ... {
2444
+ ... "key": [0, 0, 1, 1, 1],
2445
+ ... "A": [np.nan, 2, np.nan, 3, np.nan],
2446
+ ... "B": [2, 3, np.nan, np.nan, np.nan],
2447
+ ... "C": [np.nan, np.nan, 2, np.nan, np.nan],
2448
+ ... }
2449
+ ... )
2450
+ >>> df
2451
+ key A B C
2452
+ 0 0 NaN 2.0 NaN
2453
+ 1 0 2.0 3.0 NaN
2454
+ 2 1 NaN NaN 2.0
2455
+ 3 1 3.0 NaN NaN
2456
+ 4 1 NaN NaN NaN
2457
+
2458
+ Propagate non-null values forward or backward within each group along columns.
2459
+
2460
+ >>> df.groupby("key").fillna(method="ffill")
2461
+ A B C
2462
+ 0 NaN 2.0 NaN
2463
+ 1 2.0 3.0 NaN
2464
+ 2 NaN NaN 2.0
2465
+ 3 3.0 NaN 2.0
2466
+ 4 3.0 NaN 2.0
2467
+
2468
+ >>> df.groupby("key").fillna(method="bfill")
2469
+ A B C
2470
+ 0 2.0 2.0 NaN
2471
+ 1 2.0 3.0 NaN
2472
+ 2 3.0 NaN 2.0
2473
+ 3 3.0 NaN NaN
2474
+ 4 NaN NaN NaN
2475
+
2476
+ Propagate non-null values forward or backward within each group along rows.
2477
+
2478
+ >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="ffill").T
2479
+ key A B C
2480
+ 0 0.0 0.0 2.0 2.0
2481
+ 1 0.0 2.0 3.0 3.0
2482
+ 2 1.0 1.0 NaN 2.0
2483
+ 3 1.0 3.0 NaN NaN
2484
+ 4 1.0 1.0 NaN NaN
2485
+
2486
+ >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="bfill").T
2487
+ key A B C
2488
+ 0 0.0 NaN 2.0 NaN
2489
+ 1 0.0 2.0 3.0 NaN
2490
+ 2 1.0 NaN 2.0 2.0
2491
+ 3 1.0 3.0 NaN NaN
2492
+ 4 1.0 NaN NaN NaN
2493
+
2494
+ Only replace the first NaN element within a group along rows.
2495
+
2496
+ >>> df.groupby("key").fillna(method="ffill", limit=1)
2497
+ A B C
2498
+ 0 NaN 2.0 NaN
2499
+ 1 2.0 3.0 NaN
2500
+ 2 NaN NaN 2.0
2501
+ 3 3.0 NaN 2.0
2502
+ 4 3.0 NaN NaN
2503
+ """
2504
+ warnings.warn(
2505
+ f"{type(self).__name__}.fillna is deprecated and "
2506
+ "will be removed in a future version. Use obj.ffill() or obj.bfill() "
2507
+ "for forward or backward filling instead. If you want to fill with a "
2508
+ f"single value, use {type(self.obj).__name__}.fillna instead",
2509
+ FutureWarning,
2510
+ stacklevel=find_stack_level(),
2511
+ )
2512
+
2513
+ result = self._op_via_apply(
2514
+ "fillna",
2515
+ value=value,
2516
+ method=method,
2517
+ axis=axis,
2518
+ inplace=inplace,
2519
+ limit=limit,
2520
+ downcast=downcast,
2521
+ )
2522
+ return result
2523
+
2524
+ def take(
2525
+ self,
2526
+ indices: TakeIndexer,
2527
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2528
+ **kwargs,
2529
+ ) -> DataFrame:
2530
+ """
2531
+ Return the elements in the given *positional* indices in each group.
2532
+
2533
+ This means that we are not indexing according to actual values in
2534
+ the index attribute of the object. We are indexing according to the
2535
+ actual position of the element in the object.
2536
+
2537
+ If a requested index does not exist for some group, this method will raise.
2538
+ To get similar behavior that ignores indices that don't exist, see
2539
+ :meth:`.DataFrameGroupBy.nth`.
2540
+
2541
+ Parameters
2542
+ ----------
2543
+ indices : array-like
2544
+ An array of ints indicating which positions to take.
2545
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
2546
+ The axis on which to select elements. ``0`` means that we are
2547
+ selecting rows, ``1`` means that we are selecting columns.
2548
+
2549
+ .. deprecated:: 2.1.0
2550
+ For axis=1, operate on the underlying object instead. Otherwise
2551
+ the axis keyword is not necessary.
2552
+
2553
+ **kwargs
2554
+ For compatibility with :meth:`numpy.take`. Has no effect on the
2555
+ output.
2556
+
2557
+ Returns
2558
+ -------
2559
+ DataFrame
2560
+ An DataFrame containing the elements taken from each group.
2561
+
2562
+ See Also
2563
+ --------
2564
+ DataFrame.take : Take elements from a Series along an axis.
2565
+ DataFrame.loc : Select a subset of a DataFrame by labels.
2566
+ DataFrame.iloc : Select a subset of a DataFrame by positions.
2567
+ numpy.take : Take elements from an array along an axis.
2568
+
2569
+ Examples
2570
+ --------
2571
+ >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
2572
+ ... ('parrot', 'bird', 24.0),
2573
+ ... ('lion', 'mammal', 80.5),
2574
+ ... ('monkey', 'mammal', np.nan),
2575
+ ... ('rabbit', 'mammal', 15.0)],
2576
+ ... columns=['name', 'class', 'max_speed'],
2577
+ ... index=[4, 3, 2, 1, 0])
2578
+ >>> df
2579
+ name class max_speed
2580
+ 4 falcon bird 389.0
2581
+ 3 parrot bird 24.0
2582
+ 2 lion mammal 80.5
2583
+ 1 monkey mammal NaN
2584
+ 0 rabbit mammal 15.0
2585
+ >>> gb = df.groupby([1, 1, 2, 2, 2])
2586
+
2587
+ Take elements at positions 0 and 1 along the axis 0 (default).
2588
+
2589
+ Note how the indices selected in the result do not correspond to
2590
+ our input indices 0 and 1. That's because we are selecting the 0th
2591
+ and 1st rows, not rows whose indices equal 0 and 1.
2592
+
2593
+ >>> gb.take([0, 1])
2594
+ name class max_speed
2595
+ 1 4 falcon bird 389.0
2596
+ 3 parrot bird 24.0
2597
+ 2 2 lion mammal 80.5
2598
+ 1 monkey mammal NaN
2599
+
2600
+ The order of the specified indices influences the order in the result.
2601
+ Here, the order is swapped from the previous example.
2602
+
2603
+ >>> gb.take([1, 0])
2604
+ name class max_speed
2605
+ 1 3 parrot bird 24.0
2606
+ 4 falcon bird 389.0
2607
+ 2 1 monkey mammal NaN
2608
+ 2 lion mammal 80.5
2609
+
2610
+ Take elements at indices 1 and 2 along the axis 1 (column selection).
2611
+
2612
+ We may take elements using negative integers for positive indices,
2613
+ starting from the end of the object, just like with Python lists.
2614
+
2615
+ >>> gb.take([-1, -2])
2616
+ name class max_speed
2617
+ 1 3 parrot bird 24.0
2618
+ 4 falcon bird 389.0
2619
+ 2 0 rabbit mammal 15.0
2620
+ 1 monkey mammal NaN
2621
+ """
2622
+ result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs)
2623
+ return result
2624
+
2625
+ def skew(
2626
+ self,
2627
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2628
+ skipna: bool = True,
2629
+ numeric_only: bool = False,
2630
+ **kwargs,
2631
+ ) -> DataFrame:
2632
+ """
2633
+ Return unbiased skew within groups.
2634
+
2635
+ Normalized by N-1.
2636
+
2637
+ Parameters
2638
+ ----------
2639
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
2640
+ Axis for the function to be applied on.
2641
+
2642
+ Specifying ``axis=None`` will apply the aggregation across both axes.
2643
+
2644
+ .. versionadded:: 2.0.0
2645
+
2646
+ .. deprecated:: 2.1.0
2647
+ For axis=1, operate on the underlying object instead. Otherwise
2648
+ the axis keyword is not necessary.
2649
+
2650
+ skipna : bool, default True
2651
+ Exclude NA/null values when computing the result.
2652
+
2653
+ numeric_only : bool, default False
2654
+ Include only float, int, boolean columns.
2655
+
2656
+ **kwargs
2657
+ Additional keyword arguments to be passed to the function.
2658
+
2659
+ Returns
2660
+ -------
2661
+ DataFrame
2662
+
2663
+ See Also
2664
+ --------
2665
+ DataFrame.skew : Return unbiased skew over requested axis.
2666
+
2667
+ Examples
2668
+ --------
2669
+ >>> arrays = [['falcon', 'parrot', 'cockatoo', 'kiwi',
2670
+ ... 'lion', 'monkey', 'rabbit'],
2671
+ ... ['bird', 'bird', 'bird', 'bird',
2672
+ ... 'mammal', 'mammal', 'mammal']]
2673
+ >>> index = pd.MultiIndex.from_arrays(arrays, names=('name', 'class'))
2674
+ >>> df = pd.DataFrame({'max_speed': [389.0, 24.0, 70.0, np.nan,
2675
+ ... 80.5, 21.5, 15.0]},
2676
+ ... index=index)
2677
+ >>> df
2678
+ max_speed
2679
+ name class
2680
+ falcon bird 389.0
2681
+ parrot bird 24.0
2682
+ cockatoo bird 70.0
2683
+ kiwi bird NaN
2684
+ lion mammal 80.5
2685
+ monkey mammal 21.5
2686
+ rabbit mammal 15.0
2687
+ >>> gb = df.groupby(["class"])
2688
+ >>> gb.skew()
2689
+ max_speed
2690
+ class
2691
+ bird 1.628296
2692
+ mammal 1.669046
2693
+ >>> gb.skew(skipna=False)
2694
+ max_speed
2695
+ class
2696
+ bird NaN
2697
+ mammal 1.669046
2698
+ """
2699
+ if axis is lib.no_default:
2700
+ axis = 0
2701
+
2702
+ if axis != 0:
2703
+ result = self._op_via_apply(
2704
+ "skew",
2705
+ axis=axis,
2706
+ skipna=skipna,
2707
+ numeric_only=numeric_only,
2708
+ **kwargs,
2709
+ )
2710
+ return result
2711
+
2712
+ def alt(obj):
2713
+ # This should not be reached since the cython path should raise
2714
+ # TypeError and not NotImplementedError.
2715
+ raise TypeError(f"'skew' is not supported for dtype={obj.dtype}")
2716
+
2717
+ return self._cython_agg_general(
2718
+ "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs
2719
+ )
2720
+
2721
+ @property
2722
+ @doc(DataFrame.plot.__doc__)
2723
+ def plot(self) -> GroupByPlot:
2724
+ result = GroupByPlot(self)
2725
+ return result
2726
+
2727
+ @doc(DataFrame.corr.__doc__)
2728
+ def corr(
2729
+ self,
2730
+ method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",
2731
+ min_periods: int = 1,
2732
+ numeric_only: bool = False,
2733
+ ) -> DataFrame:
2734
+ result = self._op_via_apply(
2735
+ "corr", method=method, min_periods=min_periods, numeric_only=numeric_only
2736
+ )
2737
+ return result
2738
+
2739
+ @doc(DataFrame.cov.__doc__)
2740
+ def cov(
2741
+ self,
2742
+ min_periods: int | None = None,
2743
+ ddof: int | None = 1,
2744
+ numeric_only: bool = False,
2745
+ ) -> DataFrame:
2746
+ result = self._op_via_apply(
2747
+ "cov", min_periods=min_periods, ddof=ddof, numeric_only=numeric_only
2748
+ )
2749
+ return result
2750
+
2751
+ @doc(DataFrame.hist.__doc__)
2752
+ def hist(
2753
+ self,
2754
+ column: IndexLabel | None = None,
2755
+ by=None,
2756
+ grid: bool = True,
2757
+ xlabelsize: int | None = None,
2758
+ xrot: float | None = None,
2759
+ ylabelsize: int | None = None,
2760
+ yrot: float | None = None,
2761
+ ax=None,
2762
+ sharex: bool = False,
2763
+ sharey: bool = False,
2764
+ figsize: tuple[int, int] | None = None,
2765
+ layout: tuple[int, int] | None = None,
2766
+ bins: int | Sequence[int] = 10,
2767
+ backend: str | None = None,
2768
+ legend: bool = False,
2769
+ **kwargs,
2770
+ ):
2771
+ result = self._op_via_apply(
2772
+ "hist",
2773
+ column=column,
2774
+ by=by,
2775
+ grid=grid,
2776
+ xlabelsize=xlabelsize,
2777
+ xrot=xrot,
2778
+ ylabelsize=ylabelsize,
2779
+ yrot=yrot,
2780
+ ax=ax,
2781
+ sharex=sharex,
2782
+ sharey=sharey,
2783
+ figsize=figsize,
2784
+ layout=layout,
2785
+ bins=bins,
2786
+ backend=backend,
2787
+ legend=legend,
2788
+ **kwargs,
2789
+ )
2790
+ return result
2791
+
2792
+ @property
2793
+ @doc(DataFrame.dtypes.__doc__)
2794
+ def dtypes(self) -> Series:
2795
+ # GH#51045
2796
+ warnings.warn(
2797
+ f"{type(self).__name__}.dtypes is deprecated and will be removed in "
2798
+ "a future version. Check the dtypes on the base object instead",
2799
+ FutureWarning,
2800
+ stacklevel=find_stack_level(),
2801
+ )
2802
+
2803
+ # error: Incompatible return value type (got "DataFrame", expected "Series")
2804
+ return self._python_apply_general( # type: ignore[return-value]
2805
+ lambda df: df.dtypes, self._selected_obj
2806
+ )
2807
+
2808
+ @doc(DataFrame.corrwith.__doc__)
2809
+ def corrwith(
2810
+ self,
2811
+ other: DataFrame | Series,
2812
+ axis: Axis | lib.NoDefault = lib.no_default,
2813
+ drop: bool = False,
2814
+ method: CorrelationMethod = "pearson",
2815
+ numeric_only: bool = False,
2816
+ ) -> DataFrame:
2817
+ result = self._op_via_apply(
2818
+ "corrwith",
2819
+ other=other,
2820
+ axis=axis,
2821
+ drop=drop,
2822
+ method=method,
2823
+ numeric_only=numeric_only,
2824
+ )
2825
+ return result
2826
+
2827
+
2828
+ def _wrap_transform_general_frame(
2829
+ obj: DataFrame, group: DataFrame, res: DataFrame | Series
2830
+ ) -> DataFrame:
2831
+ from pandas import concat
2832
+
2833
+ if isinstance(res, Series):
2834
+ # we need to broadcast across the
2835
+ # other dimension; this will preserve dtypes
2836
+ # GH14457
2837
+ if res.index.is_(obj.index):
2838
+ res_frame = concat([res] * len(group.columns), axis=1)
2839
+ res_frame.columns = group.columns
2840
+ res_frame.index = group.index
2841
+ else:
2842
+ res_frame = obj._constructor(
2843
+ np.tile(res.values, (len(group.index), 1)),
2844
+ columns=group.columns,
2845
+ index=group.index,
2846
+ )
2847
+ assert isinstance(res_frame, DataFrame)
2848
+ return res_frame
2849
+ elif isinstance(res, DataFrame) and not res.index.is_(group.index):
2850
+ return res._align_frame(group)[0]
2851
+ else:
2852
+ return res
vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/groupby.py ADDED
The diff for this file is too large to render. See raw diff
 
vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/grouper.py ADDED
@@ -0,0 +1,1102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provide user facing operators for doing the split part of the
3
+ split-apply-combine paradigm.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ final,
10
+ )
11
+ import warnings
12
+
13
+ import numpy as np
14
+
15
+ from pandas._config import (
16
+ using_copy_on_write,
17
+ warn_copy_on_write,
18
+ )
19
+
20
+ from pandas._libs import lib
21
+ from pandas._libs.tslibs import OutOfBoundsDatetime
22
+ from pandas.errors import InvalidIndexError
23
+ from pandas.util._decorators import cache_readonly
24
+ from pandas.util._exceptions import find_stack_level
25
+
26
+ from pandas.core.dtypes.common import (
27
+ is_list_like,
28
+ is_scalar,
29
+ )
30
+ from pandas.core.dtypes.dtypes import CategoricalDtype
31
+
32
+ from pandas.core import algorithms
33
+ from pandas.core.arrays import (
34
+ Categorical,
35
+ ExtensionArray,
36
+ )
37
+ import pandas.core.common as com
38
+ from pandas.core.frame import DataFrame
39
+ from pandas.core.groupby import ops
40
+ from pandas.core.groupby.categorical import recode_for_groupby
41
+ from pandas.core.indexes.api import (
42
+ CategoricalIndex,
43
+ Index,
44
+ MultiIndex,
45
+ )
46
+ from pandas.core.series import Series
47
+
48
+ from pandas.io.formats.printing import pprint_thing
49
+
50
+ if TYPE_CHECKING:
51
+ from collections.abc import (
52
+ Hashable,
53
+ Iterator,
54
+ )
55
+
56
+ from pandas._typing import (
57
+ ArrayLike,
58
+ Axis,
59
+ NDFrameT,
60
+ npt,
61
+ )
62
+
63
+ from pandas.core.generic import NDFrame
64
+
65
+
66
+ class Grouper:
67
+ """
68
+ A Grouper allows the user to specify a groupby instruction for an object.
69
+
70
+ This specification will select a column via the key parameter, or if the
71
+ level and/or axis parameters are given, a level of the index of the target
72
+ object.
73
+
74
+ If `axis` and/or `level` are passed as keywords to both `Grouper` and
75
+ `groupby`, the values passed to `Grouper` take precedence.
76
+
77
+ Parameters
78
+ ----------
79
+ key : str, defaults to None
80
+ Groupby key, which selects the grouping column of the target.
81
+ level : name/number, defaults to None
82
+ The level for the target index.
83
+ freq : str / frequency object, defaults to None
84
+ This will groupby the specified frequency if the target selection
85
+ (via key or level) is a datetime-like object. For full specification
86
+ of available frequencies, please see `here
87
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
88
+ axis : str, int, defaults to 0
89
+ Number/name of the axis.
90
+ sort : bool, default to False
91
+ Whether to sort the resulting labels.
92
+ closed : {'left' or 'right'}
93
+ Closed end of interval. Only when `freq` parameter is passed.
94
+ label : {'left' or 'right'}
95
+ Interval boundary to use for labeling.
96
+ Only when `freq` parameter is passed.
97
+ convention : {'start', 'end', 'e', 's'}
98
+ If grouper is PeriodIndex and `freq` parameter is passed.
99
+
100
+ origin : Timestamp or str, default 'start_day'
101
+ The timestamp on which to adjust the grouping. The timezone of origin must
102
+ match the timezone of the index.
103
+ If string, must be one of the following:
104
+
105
+ - 'epoch': `origin` is 1970-01-01
106
+ - 'start': `origin` is the first value of the timeseries
107
+ - 'start_day': `origin` is the first day at midnight of the timeseries
108
+
109
+ - 'end': `origin` is the last value of the timeseries
110
+ - 'end_day': `origin` is the ceiling midnight of the last day
111
+
112
+ .. versionadded:: 1.3.0
113
+
114
+ offset : Timedelta or str, default is None
115
+ An offset timedelta added to the origin.
116
+
117
+ dropna : bool, default True
118
+ If True, and if group keys contain NA values, NA values together with
119
+ row/column will be dropped. If False, NA values will also be treated as
120
+ the key in groups.
121
+
122
+ Returns
123
+ -------
124
+ Grouper or pandas.api.typing.TimeGrouper
125
+ A TimeGrouper is returned if ``freq`` is not ``None``. Otherwise, a Grouper
126
+ is returned.
127
+
128
+ Examples
129
+ --------
130
+ ``df.groupby(pd.Grouper(key="Animal"))`` is equivalent to ``df.groupby('Animal')``
131
+
132
+ >>> df = pd.DataFrame(
133
+ ... {
134
+ ... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
135
+ ... "Speed": [100, 5, 200, 300, 15],
136
+ ... }
137
+ ... )
138
+ >>> df
139
+ Animal Speed
140
+ 0 Falcon 100
141
+ 1 Parrot 5
142
+ 2 Falcon 200
143
+ 3 Falcon 300
144
+ 4 Parrot 15
145
+ >>> df.groupby(pd.Grouper(key="Animal")).mean()
146
+ Speed
147
+ Animal
148
+ Falcon 200.0
149
+ Parrot 10.0
150
+
151
+ Specify a resample operation on the column 'Publish date'
152
+
153
+ >>> df = pd.DataFrame(
154
+ ... {
155
+ ... "Publish date": [
156
+ ... pd.Timestamp("2000-01-02"),
157
+ ... pd.Timestamp("2000-01-02"),
158
+ ... pd.Timestamp("2000-01-09"),
159
+ ... pd.Timestamp("2000-01-16")
160
+ ... ],
161
+ ... "ID": [0, 1, 2, 3],
162
+ ... "Price": [10, 20, 30, 40]
163
+ ... }
164
+ ... )
165
+ >>> df
166
+ Publish date ID Price
167
+ 0 2000-01-02 0 10
168
+ 1 2000-01-02 1 20
169
+ 2 2000-01-09 2 30
170
+ 3 2000-01-16 3 40
171
+ >>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
172
+ ID Price
173
+ Publish date
174
+ 2000-01-02 0.5 15.0
175
+ 2000-01-09 2.0 30.0
176
+ 2000-01-16 3.0 40.0
177
+
178
+ If you want to adjust the start of the bins based on a fixed timestamp:
179
+
180
+ >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
181
+ >>> rng = pd.date_range(start, end, freq='7min')
182
+ >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
183
+ >>> ts
184
+ 2000-10-01 23:30:00 0
185
+ 2000-10-01 23:37:00 3
186
+ 2000-10-01 23:44:00 6
187
+ 2000-10-01 23:51:00 9
188
+ 2000-10-01 23:58:00 12
189
+ 2000-10-02 00:05:00 15
190
+ 2000-10-02 00:12:00 18
191
+ 2000-10-02 00:19:00 21
192
+ 2000-10-02 00:26:00 24
193
+ Freq: 7min, dtype: int64
194
+
195
+ >>> ts.groupby(pd.Grouper(freq='17min')).sum()
196
+ 2000-10-01 23:14:00 0
197
+ 2000-10-01 23:31:00 9
198
+ 2000-10-01 23:48:00 21
199
+ 2000-10-02 00:05:00 54
200
+ 2000-10-02 00:22:00 24
201
+ Freq: 17min, dtype: int64
202
+
203
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
204
+ 2000-10-01 23:18:00 0
205
+ 2000-10-01 23:35:00 18
206
+ 2000-10-01 23:52:00 27
207
+ 2000-10-02 00:09:00 39
208
+ 2000-10-02 00:26:00 24
209
+ Freq: 17min, dtype: int64
210
+
211
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
212
+ 2000-10-01 23:24:00 3
213
+ 2000-10-01 23:41:00 15
214
+ 2000-10-01 23:58:00 45
215
+ 2000-10-02 00:15:00 45
216
+ Freq: 17min, dtype: int64
217
+
218
+ If you want to adjust the start of the bins with an `offset` Timedelta, the two
219
+ following lines are equivalent:
220
+
221
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
222
+ 2000-10-01 23:30:00 9
223
+ 2000-10-01 23:47:00 21
224
+ 2000-10-02 00:04:00 54
225
+ 2000-10-02 00:21:00 24
226
+ Freq: 17min, dtype: int64
227
+
228
+ >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
229
+ 2000-10-01 23:30:00 9
230
+ 2000-10-01 23:47:00 21
231
+ 2000-10-02 00:04:00 54
232
+ 2000-10-02 00:21:00 24
233
+ Freq: 17min, dtype: int64
234
+
235
+ To replace the use of the deprecated `base` argument, you can now use `offset`,
236
+ in this example it is equivalent to have `base=2`:
237
+
238
+ >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
239
+ 2000-10-01 23:16:00 0
240
+ 2000-10-01 23:33:00 9
241
+ 2000-10-01 23:50:00 36
242
+ 2000-10-02 00:07:00 39
243
+ 2000-10-02 00:24:00 24
244
+ Freq: 17min, dtype: int64
245
+ """
246
+
247
+ sort: bool
248
+ dropna: bool
249
+ _gpr_index: Index | None
250
+ _grouper: Index | None
251
+
252
+ _attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort", "dropna")
253
+
254
+ def __new__(cls, *args, **kwargs):
255
+ if kwargs.get("freq") is not None:
256
+ from pandas.core.resample import TimeGrouper
257
+
258
+ cls = TimeGrouper
259
+ return super().__new__(cls)
260
+
261
+ def __init__(
262
+ self,
263
+ key=None,
264
+ level=None,
265
+ freq=None,
266
+ axis: Axis | lib.NoDefault = lib.no_default,
267
+ sort: bool = False,
268
+ dropna: bool = True,
269
+ ) -> None:
270
+ if type(self) is Grouper:
271
+ # i.e. not TimeGrouper
272
+ if axis is not lib.no_default:
273
+ warnings.warn(
274
+ "Grouper axis keyword is deprecated and will be removed in a "
275
+ "future version. To group on axis=1, use obj.T.groupby(...) "
276
+ "instead",
277
+ FutureWarning,
278
+ stacklevel=find_stack_level(),
279
+ )
280
+ else:
281
+ axis = 0
282
+ if axis is lib.no_default:
283
+ axis = 0
284
+
285
+ self.key = key
286
+ self.level = level
287
+ self.freq = freq
288
+ self.axis = axis
289
+ self.sort = sort
290
+ self.dropna = dropna
291
+
292
+ self._grouper_deprecated = None
293
+ self._indexer_deprecated: npt.NDArray[np.intp] | None = None
294
+ self._obj_deprecated = None
295
+ self._gpr_index = None
296
+ self.binner = None
297
+ self._grouper = None
298
+ self._indexer: npt.NDArray[np.intp] | None = None
299
+
300
+ def _get_grouper(
301
+ self, obj: NDFrameT, validate: bool = True
302
+ ) -> tuple[ops.BaseGrouper, NDFrameT]:
303
+ """
304
+ Parameters
305
+ ----------
306
+ obj : Series or DataFrame
307
+ validate : bool, default True
308
+ if True, validate the grouper
309
+
310
+ Returns
311
+ -------
312
+ a tuple of grouper, obj (possibly sorted)
313
+ """
314
+ obj, _, _ = self._set_grouper(obj)
315
+ grouper, _, obj = get_grouper(
316
+ obj,
317
+ [self.key],
318
+ axis=self.axis,
319
+ level=self.level,
320
+ sort=self.sort,
321
+ validate=validate,
322
+ dropna=self.dropna,
323
+ )
324
+ # Without setting this, subsequent lookups to .groups raise
325
+ # error: Incompatible types in assignment (expression has type "BaseGrouper",
326
+ # variable has type "None")
327
+ self._grouper_deprecated = grouper # type: ignore[assignment]
328
+
329
+ return grouper, obj
330
+
331
+ def _set_grouper(
332
+ self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None
333
+ ) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]:
334
+ """
335
+ given an object and the specifications, setup the internal grouper
336
+ for this particular specification
337
+
338
+ Parameters
339
+ ----------
340
+ obj : Series or DataFrame
341
+ sort : bool, default False
342
+ whether the resulting grouper should be sorted
343
+ gpr_index : Index or None, default None
344
+
345
+ Returns
346
+ -------
347
+ NDFrame
348
+ Index
349
+ np.ndarray[np.intp] | None
350
+ """
351
+ assert obj is not None
352
+
353
+ if self.key is not None and self.level is not None:
354
+ raise ValueError("The Grouper cannot specify both a key and a level!")
355
+
356
+ # Keep self._grouper value before overriding
357
+ if self._grouper is None:
358
+ # TODO: What are we assuming about subsequent calls?
359
+ self._grouper = gpr_index
360
+ self._indexer = self._indexer_deprecated
361
+
362
+ # the key must be a valid info item
363
+ if self.key is not None:
364
+ key = self.key
365
+ # The 'on' is already defined
366
+ if getattr(gpr_index, "name", None) == key and isinstance(obj, Series):
367
+ # Sometimes self._grouper will have been resorted while
368
+ # obj has not. In this case there is a mismatch when we
369
+ # call self._grouper.take(obj.index) so we need to undo the sorting
370
+ # before we call _grouper.take.
371
+ assert self._grouper is not None
372
+ if self._indexer is not None:
373
+ reverse_indexer = self._indexer.argsort()
374
+ unsorted_ax = self._grouper.take(reverse_indexer)
375
+ ax = unsorted_ax.take(obj.index)
376
+ else:
377
+ ax = self._grouper.take(obj.index)
378
+ else:
379
+ if key not in obj._info_axis:
380
+ raise KeyError(f"The grouper name {key} is not found")
381
+ ax = Index(obj[key], name=key)
382
+
383
+ else:
384
+ ax = obj._get_axis(self.axis)
385
+ if self.level is not None:
386
+ level = self.level
387
+
388
+ # if a level is given it must be a mi level or
389
+ # equivalent to the axis name
390
+ if isinstance(ax, MultiIndex):
391
+ level = ax._get_level_number(level)
392
+ ax = Index(ax._get_level_values(level), name=ax.names[level])
393
+
394
+ else:
395
+ if level not in (0, ax.name):
396
+ raise ValueError(f"The level {level} is not valid")
397
+
398
+ # possibly sort
399
+ indexer: npt.NDArray[np.intp] | None = None
400
+ if (self.sort or sort) and not ax.is_monotonic_increasing:
401
+ # use stable sort to support first, last, nth
402
+ # TODO: why does putting na_position="first" fix datetimelike cases?
403
+ indexer = self._indexer_deprecated = ax.array.argsort(
404
+ kind="mergesort", na_position="first"
405
+ )
406
+ ax = ax.take(indexer)
407
+ obj = obj.take(indexer, axis=self.axis)
408
+
409
+ # error: Incompatible types in assignment (expression has type
410
+ # "NDFrameT", variable has type "None")
411
+ self._obj_deprecated = obj # type: ignore[assignment]
412
+ self._gpr_index = ax
413
+ return obj, ax, indexer
414
+
415
+ @final
416
+ @property
417
+ def ax(self) -> Index:
418
+ warnings.warn(
419
+ f"{type(self).__name__}.ax is deprecated and will be removed in a "
420
+ "future version. Use Resampler.ax instead",
421
+ FutureWarning,
422
+ stacklevel=find_stack_level(),
423
+ )
424
+ index = self._gpr_index
425
+ if index is None:
426
+ raise ValueError("_set_grouper must be called before ax is accessed")
427
+ return index
428
+
429
+ @final
430
+ @property
431
+ def indexer(self):
432
+ warnings.warn(
433
+ f"{type(self).__name__}.indexer is deprecated and will be removed "
434
+ "in a future version. Use Resampler.indexer instead.",
435
+ FutureWarning,
436
+ stacklevel=find_stack_level(),
437
+ )
438
+ return self._indexer_deprecated
439
+
440
+ @final
441
+ @property
442
+ def obj(self):
443
+ # TODO(3.0): enforcing these deprecations on Grouper should close
444
+ # GH#25564, GH#41930
445
+ warnings.warn(
446
+ f"{type(self).__name__}.obj is deprecated and will be removed "
447
+ "in a future version. Use GroupBy.indexer instead.",
448
+ FutureWarning,
449
+ stacklevel=find_stack_level(),
450
+ )
451
+ return self._obj_deprecated
452
+
453
+ @final
454
+ @property
455
+ def grouper(self):
456
+ warnings.warn(
457
+ f"{type(self).__name__}.grouper is deprecated and will be removed "
458
+ "in a future version. Use GroupBy.grouper instead.",
459
+ FutureWarning,
460
+ stacklevel=find_stack_level(),
461
+ )
462
+ return self._grouper_deprecated
463
+
464
+ @final
465
+ @property
466
+ def groups(self):
467
+ warnings.warn(
468
+ f"{type(self).__name__}.groups is deprecated and will be removed "
469
+ "in a future version. Use GroupBy.groups instead.",
470
+ FutureWarning,
471
+ stacklevel=find_stack_level(),
472
+ )
473
+ # error: "None" has no attribute "groups"
474
+ return self._grouper_deprecated.groups # type: ignore[attr-defined]
475
+
476
+ @final
477
+ def __repr__(self) -> str:
478
+ attrs_list = (
479
+ f"{attr_name}={repr(getattr(self, attr_name))}"
480
+ for attr_name in self._attributes
481
+ if getattr(self, attr_name) is not None
482
+ )
483
+ attrs = ", ".join(attrs_list)
484
+ cls_name = type(self).__name__
485
+ return f"{cls_name}({attrs})"
486
+
487
+
488
+ @final
489
+ class Grouping:
490
+ """
491
+ Holds the grouping information for a single key
492
+
493
+ Parameters
494
+ ----------
495
+ index : Index
496
+ grouper :
497
+ obj : DataFrame or Series
498
+ name : Label
499
+ level :
500
+ observed : bool, default False
501
+ If we are a Categorical, use the observed values
502
+ in_axis : if the Grouping is a column in self.obj and hence among
503
+ Groupby.exclusions list
504
+ dropna : bool, default True
505
+ Whether to drop NA groups.
506
+ uniques : Array-like, optional
507
+ When specified, will be used for unique values. Enables including empty groups
508
+ in the result for a BinGrouper. Must not contain duplicates.
509
+
510
+ Attributes
511
+ -------
512
+ indices : dict
513
+ Mapping of {group -> index_list}
514
+ codes : ndarray
515
+ Group codes
516
+ group_index : Index or None
517
+ unique groups
518
+ groups : dict
519
+ Mapping of {group -> label_list}
520
+ """
521
+
522
+ _codes: npt.NDArray[np.signedinteger] | None = None
523
+ _all_grouper: Categorical | None
524
+ _orig_cats: Index | None
525
+ _index: Index
526
+
527
+ def __init__(
528
+ self,
529
+ index: Index,
530
+ grouper=None,
531
+ obj: NDFrame | None = None,
532
+ level=None,
533
+ sort: bool = True,
534
+ observed: bool = False,
535
+ in_axis: bool = False,
536
+ dropna: bool = True,
537
+ uniques: ArrayLike | None = None,
538
+ ) -> None:
539
+ self.level = level
540
+ self._orig_grouper = grouper
541
+ grouping_vector = _convert_grouper(index, grouper)
542
+ self._all_grouper = None
543
+ self._orig_cats = None
544
+ self._index = index
545
+ self._sort = sort
546
+ self.obj = obj
547
+ self._observed = observed
548
+ self.in_axis = in_axis
549
+ self._dropna = dropna
550
+ self._uniques = uniques
551
+
552
+ # we have a single grouper which may be a myriad of things,
553
+ # some of which are dependent on the passing in level
554
+
555
+ ilevel = self._ilevel
556
+ if ilevel is not None:
557
+ # In extant tests, the new self.grouping_vector matches
558
+ # `index.get_level_values(ilevel)` whenever
559
+ # mapper is None and isinstance(index, MultiIndex)
560
+ if isinstance(index, MultiIndex):
561
+ index_level = index.get_level_values(ilevel)
562
+ else:
563
+ index_level = index
564
+
565
+ if grouping_vector is None:
566
+ grouping_vector = index_level
567
+ else:
568
+ mapper = grouping_vector
569
+ grouping_vector = index_level.map(mapper)
570
+
571
+ # a passed Grouper like, directly get the grouper in the same way
572
+ # as single grouper groupby, use the group_info to get codes
573
+ elif isinstance(grouping_vector, Grouper):
574
+ # get the new grouper; we already have disambiguated
575
+ # what key/level refer to exactly, don't need to
576
+ # check again as we have by this point converted these
577
+ # to an actual value (rather than a pd.Grouper)
578
+ assert self.obj is not None # for mypy
579
+ newgrouper, newobj = grouping_vector._get_grouper(self.obj, validate=False)
580
+ self.obj = newobj
581
+
582
+ if isinstance(newgrouper, ops.BinGrouper):
583
+ # TODO: can we unwrap this and get a tighter typing
584
+ # for self.grouping_vector?
585
+ grouping_vector = newgrouper
586
+ else:
587
+ # ops.BaseGrouper
588
+ # TODO: 2023-02-03 no test cases with len(newgrouper.groupings) > 1.
589
+ # If that were to occur, would we be throwing out information?
590
+ # error: Cannot determine type of "grouping_vector" [has-type]
591
+ ng = newgrouper.groupings[0].grouping_vector # type: ignore[has-type]
592
+ # use Index instead of ndarray so we can recover the name
593
+ grouping_vector = Index(ng, name=newgrouper.result_index.name)
594
+
595
+ elif not isinstance(
596
+ grouping_vector, (Series, Index, ExtensionArray, np.ndarray)
597
+ ):
598
+ # no level passed
599
+ if getattr(grouping_vector, "ndim", 1) != 1:
600
+ t = str(type(grouping_vector))
601
+ raise ValueError(f"Grouper for '{t}' not 1-dimensional")
602
+
603
+ grouping_vector = index.map(grouping_vector)
604
+
605
+ if not (
606
+ hasattr(grouping_vector, "__len__")
607
+ and len(grouping_vector) == len(index)
608
+ ):
609
+ grper = pprint_thing(grouping_vector)
610
+ errmsg = (
611
+ "Grouper result violates len(labels) == "
612
+ f"len(data)\nresult: {grper}"
613
+ )
614
+ raise AssertionError(errmsg)
615
+
616
+ if isinstance(grouping_vector, np.ndarray):
617
+ if grouping_vector.dtype.kind in "mM":
618
+ # if we have a date/time-like grouper, make sure that we have
619
+ # Timestamps like
620
+ # TODO 2022-10-08 we only have one test that gets here and
621
+ # values are already in nanoseconds in that case.
622
+ grouping_vector = Series(grouping_vector).to_numpy()
623
+ elif isinstance(getattr(grouping_vector, "dtype", None), CategoricalDtype):
624
+ # a passed Categorical
625
+ self._orig_cats = grouping_vector.categories
626
+ grouping_vector, self._all_grouper = recode_for_groupby(
627
+ grouping_vector, sort, observed
628
+ )
629
+
630
+ self.grouping_vector = grouping_vector
631
+
632
+ def __repr__(self) -> str:
633
+ return f"Grouping({self.name})"
634
+
635
+ def __iter__(self) -> Iterator:
636
+ return iter(self.indices)
637
+
638
+ @cache_readonly
639
+ def _passed_categorical(self) -> bool:
640
+ dtype = getattr(self.grouping_vector, "dtype", None)
641
+ return isinstance(dtype, CategoricalDtype)
642
+
643
+ @cache_readonly
644
+ def name(self) -> Hashable:
645
+ ilevel = self._ilevel
646
+ if ilevel is not None:
647
+ return self._index.names[ilevel]
648
+
649
+ if isinstance(self._orig_grouper, (Index, Series)):
650
+ return self._orig_grouper.name
651
+
652
+ elif isinstance(self.grouping_vector, ops.BaseGrouper):
653
+ return self.grouping_vector.result_index.name
654
+
655
+ elif isinstance(self.grouping_vector, Index):
656
+ return self.grouping_vector.name
657
+
658
+ # otherwise we have ndarray or ExtensionArray -> no name
659
+ return None
660
+
661
+ @cache_readonly
662
+ def _ilevel(self) -> int | None:
663
+ """
664
+ If necessary, converted index level name to index level position.
665
+ """
666
+ level = self.level
667
+ if level is None:
668
+ return None
669
+ if not isinstance(level, int):
670
+ index = self._index
671
+ if level not in index.names:
672
+ raise AssertionError(f"Level {level} not in index")
673
+ return index.names.index(level)
674
+ return level
675
+
676
+ @property
677
+ def ngroups(self) -> int:
678
+ return len(self._group_index)
679
+
680
+ @cache_readonly
681
+ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
682
+ # we have a list of groupers
683
+ if isinstance(self.grouping_vector, ops.BaseGrouper):
684
+ return self.grouping_vector.indices
685
+
686
+ values = Categorical(self.grouping_vector)
687
+ return values._reverse_indexer()
688
+
689
+ @property
690
+ def codes(self) -> npt.NDArray[np.signedinteger]:
691
+ return self._codes_and_uniques[0]
692
+
693
+ @cache_readonly
694
+ def _group_arraylike(self) -> ArrayLike:
695
+ """
696
+ Analogous to result_index, but holding an ArrayLike to ensure
697
+ we can retain ExtensionDtypes.
698
+ """
699
+ if self._all_grouper is not None:
700
+ # retain dtype for categories, including unobserved ones
701
+ return self._result_index._values
702
+
703
+ elif self._passed_categorical:
704
+ return self._group_index._values
705
+
706
+ return self._codes_and_uniques[1]
707
+
708
+ @property
709
+ def group_arraylike(self) -> ArrayLike:
710
+ """
711
+ Analogous to result_index, but holding an ArrayLike to ensure
712
+ we can retain ExtensionDtypes.
713
+ """
714
+ warnings.warn(
715
+ "group_arraylike is deprecated and will be removed in a future "
716
+ "version of pandas",
717
+ category=FutureWarning,
718
+ stacklevel=find_stack_level(),
719
+ )
720
+ return self._group_arraylike
721
+
722
+ @cache_readonly
723
+ def _result_index(self) -> Index:
724
+ # result_index retains dtype for categories, including unobserved ones,
725
+ # which group_index does not
726
+ if self._all_grouper is not None:
727
+ group_idx = self._group_index
728
+ assert isinstance(group_idx, CategoricalIndex)
729
+ cats = self._orig_cats
730
+ # set_categories is dynamically added
731
+ return group_idx.set_categories(cats) # type: ignore[attr-defined]
732
+ return self._group_index
733
+
734
+ @property
735
+ def result_index(self) -> Index:
736
+ warnings.warn(
737
+ "result_index is deprecated and will be removed in a future "
738
+ "version of pandas",
739
+ category=FutureWarning,
740
+ stacklevel=find_stack_level(),
741
+ )
742
+ return self._result_index
743
+
744
+ @cache_readonly
745
+ def _group_index(self) -> Index:
746
+ codes, uniques = self._codes_and_uniques
747
+ if not self._dropna and self._passed_categorical:
748
+ assert isinstance(uniques, Categorical)
749
+ if self._sort and (codes == len(uniques)).any():
750
+ # Add NA value on the end when sorting
751
+ uniques = Categorical.from_codes(
752
+ np.append(uniques.codes, [-1]), uniques.categories, validate=False
753
+ )
754
+ elif len(codes) > 0:
755
+ # Need to determine proper placement of NA value when not sorting
756
+ cat = self.grouping_vector
757
+ na_idx = (cat.codes < 0).argmax()
758
+ if cat.codes[na_idx] < 0:
759
+ # count number of unique codes that comes before the nan value
760
+ na_unique_idx = algorithms.nunique_ints(cat.codes[:na_idx])
761
+ new_codes = np.insert(uniques.codes, na_unique_idx, -1)
762
+ uniques = Categorical.from_codes(
763
+ new_codes, uniques.categories, validate=False
764
+ )
765
+ return Index._with_infer(uniques, name=self.name)
766
+
767
+ @property
768
+ def group_index(self) -> Index:
769
+ warnings.warn(
770
+ "group_index is deprecated and will be removed in a future "
771
+ "version of pandas",
772
+ category=FutureWarning,
773
+ stacklevel=find_stack_level(),
774
+ )
775
+ return self._group_index
776
+
777
+ @cache_readonly
778
+ def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]:
779
+ uniques: ArrayLike
780
+ if self._passed_categorical:
781
+ # we make a CategoricalIndex out of the cat grouper
782
+ # preserving the categories / ordered attributes;
783
+ # doesn't (yet - GH#46909) handle dropna=False
784
+ cat = self.grouping_vector
785
+ categories = cat.categories
786
+
787
+ if self._observed:
788
+ ucodes = algorithms.unique1d(cat.codes)
789
+ ucodes = ucodes[ucodes != -1]
790
+ if self._sort:
791
+ ucodes = np.sort(ucodes)
792
+ else:
793
+ ucodes = np.arange(len(categories))
794
+
795
+ uniques = Categorical.from_codes(
796
+ codes=ucodes, categories=categories, ordered=cat.ordered, validate=False
797
+ )
798
+
799
+ codes = cat.codes
800
+ if not self._dropna:
801
+ na_mask = codes < 0
802
+ if np.any(na_mask):
803
+ if self._sort:
804
+ # Replace NA codes with `largest code + 1`
805
+ na_code = len(categories)
806
+ codes = np.where(na_mask, na_code, codes)
807
+ else:
808
+ # Insert NA code into the codes based on first appearance
809
+ # A negative code must exist, no need to check codes[na_idx] < 0
810
+ na_idx = na_mask.argmax()
811
+ # count number of unique codes that comes before the nan value
812
+ na_code = algorithms.nunique_ints(codes[:na_idx])
813
+ codes = np.where(codes >= na_code, codes + 1, codes)
814
+ codes = np.where(na_mask, na_code, codes)
815
+
816
+ if not self._observed:
817
+ uniques = uniques.reorder_categories(self._orig_cats)
818
+
819
+ return codes, uniques
820
+
821
+ elif isinstance(self.grouping_vector, ops.BaseGrouper):
822
+ # we have a list of groupers
823
+ codes = self.grouping_vector.codes_info
824
+ uniques = self.grouping_vector.result_index._values
825
+ elif self._uniques is not None:
826
+ # GH#50486 Code grouping_vector using _uniques; allows
827
+ # including uniques that are not present in grouping_vector.
828
+ cat = Categorical(self.grouping_vector, categories=self._uniques)
829
+ codes = cat.codes
830
+ uniques = self._uniques
831
+ else:
832
+ # GH35667, replace dropna=False with use_na_sentinel=False
833
+ # error: Incompatible types in assignment (expression has type "Union[
834
+ # ndarray[Any, Any], Index]", variable has type "Categorical")
835
+ codes, uniques = algorithms.factorize( # type: ignore[assignment]
836
+ self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna
837
+ )
838
+ return codes, uniques
839
+
840
+ @cache_readonly
841
+ def groups(self) -> dict[Hashable, np.ndarray]:
842
+ cats = Categorical.from_codes(self.codes, self._group_index, validate=False)
843
+ return self._index.groupby(cats)
844
+
845
+
846
+ def get_grouper(
847
+ obj: NDFrameT,
848
+ key=None,
849
+ axis: Axis = 0,
850
+ level=None,
851
+ sort: bool = True,
852
+ observed: bool = False,
853
+ validate: bool = True,
854
+ dropna: bool = True,
855
+ ) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]:
856
+ """
857
+ Create and return a BaseGrouper, which is an internal
858
+ mapping of how to create the grouper indexers.
859
+ This may be composed of multiple Grouping objects, indicating
860
+ multiple groupers
861
+
862
+ Groupers are ultimately index mappings. They can originate as:
863
+ index mappings, keys to columns, functions, or Groupers
864
+
865
+ Groupers enable local references to axis,level,sort, while
866
+ the passed in axis, level, and sort are 'global'.
867
+
868
+ This routine tries to figure out what the passing in references
869
+ are and then creates a Grouping for each one, combined into
870
+ a BaseGrouper.
871
+
872
+ If observed & we have a categorical grouper, only show the observed
873
+ values.
874
+
875
+ If validate, then check for key/level overlaps.
876
+
877
+ """
878
+ group_axis = obj._get_axis(axis)
879
+
880
+ # validate that the passed single level is compatible with the passed
881
+ # axis of the object
882
+ if level is not None:
883
+ # TODO: These if-block and else-block are almost same.
884
+ # MultiIndex instance check is removable, but it seems that there are
885
+ # some processes only for non-MultiIndex in else-block,
886
+ # eg. `obj.index.name != level`. We have to consider carefully whether
887
+ # these are applicable for MultiIndex. Even if these are applicable,
888
+ # we need to check if it makes no side effect to subsequent processes
889
+ # on the outside of this condition.
890
+ # (GH 17621)
891
+ if isinstance(group_axis, MultiIndex):
892
+ if is_list_like(level) and len(level) == 1:
893
+ level = level[0]
894
+
895
+ if key is None and is_scalar(level):
896
+ # Get the level values from group_axis
897
+ key = group_axis.get_level_values(level)
898
+ level = None
899
+
900
+ else:
901
+ # allow level to be a length-one list-like object
902
+ # (e.g., level=[0])
903
+ # GH 13901
904
+ if is_list_like(level):
905
+ nlevels = len(level)
906
+ if nlevels == 1:
907
+ level = level[0]
908
+ elif nlevels == 0:
909
+ raise ValueError("No group keys passed!")
910
+ else:
911
+ raise ValueError("multiple levels only valid with MultiIndex")
912
+
913
+ if isinstance(level, str):
914
+ if obj._get_axis(axis).name != level:
915
+ raise ValueError(
916
+ f"level name {level} is not the name "
917
+ f"of the {obj._get_axis_name(axis)}"
918
+ )
919
+ elif level > 0 or level < -1:
920
+ raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
921
+
922
+ # NOTE: `group_axis` and `group_axis.get_level_values(level)`
923
+ # are same in this section.
924
+ level = None
925
+ key = group_axis
926
+
927
+ # a passed-in Grouper, directly convert
928
+ if isinstance(key, Grouper):
929
+ grouper, obj = key._get_grouper(obj, validate=False)
930
+ if key.key is None:
931
+ return grouper, frozenset(), obj
932
+ else:
933
+ return grouper, frozenset({key.key}), obj
934
+
935
+ # already have a BaseGrouper, just return it
936
+ elif isinstance(key, ops.BaseGrouper):
937
+ return key, frozenset(), obj
938
+
939
+ if not isinstance(key, list):
940
+ keys = [key]
941
+ match_axis_length = False
942
+ else:
943
+ keys = key
944
+ match_axis_length = len(keys) == len(group_axis)
945
+
946
+ # what are we after, exactly?
947
+ any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
948
+ any_groupers = any(isinstance(g, (Grouper, Grouping)) for g in keys)
949
+ any_arraylike = any(
950
+ isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys
951
+ )
952
+
953
+ # is this an index replacement?
954
+ if (
955
+ not any_callable
956
+ and not any_arraylike
957
+ and not any_groupers
958
+ and match_axis_length
959
+ and level is None
960
+ ):
961
+ if isinstance(obj, DataFrame):
962
+ all_in_columns_index = all(
963
+ g in obj.columns or g in obj.index.names for g in keys
964
+ )
965
+ else:
966
+ assert isinstance(obj, Series)
967
+ all_in_columns_index = all(g in obj.index.names for g in keys)
968
+
969
+ if not all_in_columns_index:
970
+ keys = [com.asarray_tuplesafe(keys)]
971
+
972
+ if isinstance(level, (tuple, list)):
973
+ if key is None:
974
+ keys = [None] * len(level)
975
+ levels = level
976
+ else:
977
+ levels = [level] * len(keys)
978
+
979
+ groupings: list[Grouping] = []
980
+ exclusions: set[Hashable] = set()
981
+
982
+ # if the actual grouper should be obj[key]
983
+ def is_in_axis(key) -> bool:
984
+ if not _is_label_like(key):
985
+ if obj.ndim == 1:
986
+ return False
987
+
988
+ # items -> .columns for DataFrame, .index for Series
989
+ items = obj.axes[-1]
990
+ try:
991
+ items.get_loc(key)
992
+ except (KeyError, TypeError, InvalidIndexError):
993
+ # TypeError shows up here if we pass e.g. an Index
994
+ return False
995
+
996
+ return True
997
+
998
+ # if the grouper is obj[name]
999
+ def is_in_obj(gpr) -> bool:
1000
+ if not hasattr(gpr, "name"):
1001
+ return False
1002
+ if using_copy_on_write() or warn_copy_on_write():
1003
+ # For the CoW case, we check the references to determine if the
1004
+ # series is part of the object
1005
+ try:
1006
+ obj_gpr_column = obj[gpr.name]
1007
+ except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime):
1008
+ return False
1009
+ if isinstance(gpr, Series) and isinstance(obj_gpr_column, Series):
1010
+ return gpr._mgr.references_same_values( # type: ignore[union-attr]
1011
+ obj_gpr_column._mgr, 0 # type: ignore[arg-type]
1012
+ )
1013
+ return False
1014
+ try:
1015
+ return gpr is obj[gpr.name]
1016
+ except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime):
1017
+ # IndexError reached in e.g. test_skip_group_keys when we pass
1018
+ # lambda here
1019
+ # InvalidIndexError raised on key-types inappropriate for index,
1020
+ # e.g. DatetimeIndex.get_loc(tuple())
1021
+ # OutOfBoundsDatetime raised when obj is a Series with DatetimeIndex
1022
+ # and gpr.name is month str
1023
+ return False
1024
+
1025
+ for gpr, level in zip(keys, levels):
1026
+ if is_in_obj(gpr): # df.groupby(df['name'])
1027
+ in_axis = True
1028
+ exclusions.add(gpr.name)
1029
+
1030
+ elif is_in_axis(gpr): # df.groupby('name')
1031
+ if obj.ndim != 1 and gpr in obj:
1032
+ if validate:
1033
+ obj._check_label_or_level_ambiguity(gpr, axis=axis)
1034
+ in_axis, name, gpr = True, gpr, obj[gpr]
1035
+ if gpr.ndim != 1:
1036
+ # non-unique columns; raise here to get the name in the
1037
+ # exception message
1038
+ raise ValueError(f"Grouper for '{name}' not 1-dimensional")
1039
+ exclusions.add(name)
1040
+ elif obj._is_level_reference(gpr, axis=axis):
1041
+ in_axis, level, gpr = False, gpr, None
1042
+ else:
1043
+ raise KeyError(gpr)
1044
+ elif isinstance(gpr, Grouper) and gpr.key is not None:
1045
+ # Add key to exclusions
1046
+ exclusions.add(gpr.key)
1047
+ in_axis = True
1048
+ else:
1049
+ in_axis = False
1050
+
1051
+ # create the Grouping
1052
+ # allow us to passing the actual Grouping as the gpr
1053
+ ping = (
1054
+ Grouping(
1055
+ group_axis,
1056
+ gpr,
1057
+ obj=obj,
1058
+ level=level,
1059
+ sort=sort,
1060
+ observed=observed,
1061
+ in_axis=in_axis,
1062
+ dropna=dropna,
1063
+ )
1064
+ if not isinstance(gpr, Grouping)
1065
+ else gpr
1066
+ )
1067
+
1068
+ groupings.append(ping)
1069
+
1070
+ if len(groupings) == 0 and len(obj):
1071
+ raise ValueError("No group keys passed!")
1072
+ if len(groupings) == 0:
1073
+ groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp)))
1074
+
1075
+ # create the internals grouper
1076
+ grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, dropna=dropna)
1077
+ return grouper, frozenset(exclusions), obj
1078
+
1079
+
1080
+ def _is_label_like(val) -> bool:
1081
+ return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val))
1082
+
1083
+
1084
+ def _convert_grouper(axis: Index, grouper):
1085
+ if isinstance(grouper, dict):
1086
+ return grouper.get
1087
+ elif isinstance(grouper, Series):
1088
+ if grouper.index.equals(axis):
1089
+ return grouper._values
1090
+ else:
1091
+ return grouper.reindex(axis)._values
1092
+ elif isinstance(grouper, MultiIndex):
1093
+ return grouper._values
1094
+ elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)):
1095
+ if len(grouper) != len(axis):
1096
+ raise ValueError("Grouper and axis must be same length")
1097
+
1098
+ if isinstance(grouper, (list, tuple)):
1099
+ grouper = com.asarray_tuplesafe(grouper)
1100
+ return grouper
1101
+ else:
1102
+ return grouper
vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/indexing.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Iterable
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Literal,
7
+ cast,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas.util._decorators import (
13
+ cache_readonly,
14
+ doc,
15
+ )
16
+
17
+ from pandas.core.dtypes.common import (
18
+ is_integer,
19
+ is_list_like,
20
+ )
21
+
22
+ if TYPE_CHECKING:
23
+ from pandas._typing import PositionalIndexer
24
+
25
+ from pandas import (
26
+ DataFrame,
27
+ Series,
28
+ )
29
+ from pandas.core.groupby import groupby
30
+
31
+
32
+ class GroupByIndexingMixin:
33
+ """
34
+ Mixin for adding ._positional_selector to GroupBy.
35
+ """
36
+
37
+ @cache_readonly
38
+ def _positional_selector(self) -> GroupByPositionalSelector:
39
+ """
40
+ Return positional selection for each group.
41
+
42
+ ``groupby._positional_selector[i:j]`` is similar to
43
+ ``groupby.apply(lambda x: x.iloc[i:j])``
44
+ but much faster and preserves the original index and order.
45
+
46
+ ``_positional_selector[]`` is compatible with and extends :meth:`~GroupBy.head`
47
+ and :meth:`~GroupBy.tail`. For example:
48
+
49
+ - ``head(5)``
50
+ - ``_positional_selector[5:-5]``
51
+ - ``tail(5)``
52
+
53
+ together return all the rows.
54
+
55
+ Allowed inputs for the index are:
56
+
57
+ - An integer valued iterable, e.g. ``range(2, 4)``.
58
+ - A comma separated list of integers and slices, e.g. ``5``, ``2, 4``, ``2:4``.
59
+
60
+ The output format is the same as :meth:`~GroupBy.head` and
61
+ :meth:`~GroupBy.tail`, namely
62
+ a subset of the ``DataFrame`` or ``Series`` with the index and order preserved.
63
+
64
+ Returns
65
+ -------
66
+ Series
67
+ The filtered subset of the original Series.
68
+ DataFrame
69
+ The filtered subset of the original DataFrame.
70
+
71
+ See Also
72
+ --------
73
+ DataFrame.iloc : Purely integer-location based indexing for selection by
74
+ position.
75
+ GroupBy.head : Return first n rows of each group.
76
+ GroupBy.tail : Return last n rows of each group.
77
+ GroupBy.nth : Take the nth row from each group if n is an int, or a
78
+ subset of rows, if n is a list of ints.
79
+
80
+ Notes
81
+ -----
82
+ - The slice step cannot be negative.
83
+ - If the index specification results in overlaps, the item is not duplicated.
84
+ - If the index specification changes the order of items, then
85
+ they are returned in their original order.
86
+ By contrast, ``DataFrame.iloc`` can change the row order.
87
+ - ``groupby()`` parameters such as as_index and dropna are ignored.
88
+
89
+ The differences between ``_positional_selector[]`` and :meth:`~GroupBy.nth`
90
+ with ``as_index=False`` are:
91
+
92
+ - Input to ``_positional_selector`` can include
93
+ one or more slices whereas ``nth``
94
+ just handles an integer or a list of integers.
95
+ - ``_positional_selector`` can accept a slice relative to the
96
+ last row of each group.
97
+ - ``_positional_selector`` does not have an equivalent to the
98
+ ``nth()`` ``dropna`` parameter.
99
+
100
+ Examples
101
+ --------
102
+ >>> df = pd.DataFrame([["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]],
103
+ ... columns=["A", "B"])
104
+ >>> df.groupby("A")._positional_selector[1:2]
105
+ A B
106
+ 1 a 2
107
+ 4 b 5
108
+
109
+ >>> df.groupby("A")._positional_selector[1, -1]
110
+ A B
111
+ 1 a 2
112
+ 2 a 3
113
+ 4 b 5
114
+ """
115
+ if TYPE_CHECKING:
116
+ # pylint: disable-next=used-before-assignment
117
+ groupby_self = cast(groupby.GroupBy, self)
118
+ else:
119
+ groupby_self = self
120
+
121
+ return GroupByPositionalSelector(groupby_self)
122
+
123
+ def _make_mask_from_positional_indexer(
124
+ self,
125
+ arg: PositionalIndexer | tuple,
126
+ ) -> np.ndarray:
127
+ if is_list_like(arg):
128
+ if all(is_integer(i) for i in cast(Iterable, arg)):
129
+ mask = self._make_mask_from_list(cast(Iterable[int], arg))
130
+ else:
131
+ mask = self._make_mask_from_tuple(cast(tuple, arg))
132
+
133
+ elif isinstance(arg, slice):
134
+ mask = self._make_mask_from_slice(arg)
135
+ elif is_integer(arg):
136
+ mask = self._make_mask_from_int(cast(int, arg))
137
+ else:
138
+ raise TypeError(
139
+ f"Invalid index {type(arg)}. "
140
+ "Must be integer, list-like, slice or a tuple of "
141
+ "integers and slices"
142
+ )
143
+
144
+ if isinstance(mask, bool):
145
+ if mask:
146
+ mask = self._ascending_count >= 0
147
+ else:
148
+ mask = self._ascending_count < 0
149
+
150
+ return cast(np.ndarray, mask)
151
+
152
+ def _make_mask_from_int(self, arg: int) -> np.ndarray:
153
+ if arg >= 0:
154
+ return self._ascending_count == arg
155
+ else:
156
+ return self._descending_count == (-arg - 1)
157
+
158
+ def _make_mask_from_list(self, args: Iterable[int]) -> bool | np.ndarray:
159
+ positive = [arg for arg in args if arg >= 0]
160
+ negative = [-arg - 1 for arg in args if arg < 0]
161
+
162
+ mask: bool | np.ndarray = False
163
+
164
+ if positive:
165
+ mask |= np.isin(self._ascending_count, positive)
166
+
167
+ if negative:
168
+ mask |= np.isin(self._descending_count, negative)
169
+
170
+ return mask
171
+
172
+ def _make_mask_from_tuple(self, args: tuple) -> bool | np.ndarray:
173
+ mask: bool | np.ndarray = False
174
+
175
+ for arg in args:
176
+ if is_integer(arg):
177
+ mask |= self._make_mask_from_int(cast(int, arg))
178
+ elif isinstance(arg, slice):
179
+ mask |= self._make_mask_from_slice(arg)
180
+ else:
181
+ raise ValueError(
182
+ f"Invalid argument {type(arg)}. Should be int or slice."
183
+ )
184
+
185
+ return mask
186
+
187
+ def _make_mask_from_slice(self, arg: slice) -> bool | np.ndarray:
188
+ start = arg.start
189
+ stop = arg.stop
190
+ step = arg.step
191
+
192
+ if step is not None and step < 0:
193
+ raise ValueError(f"Invalid step {step}. Must be non-negative")
194
+
195
+ mask: bool | np.ndarray = True
196
+
197
+ if step is None:
198
+ step = 1
199
+
200
+ if start is None:
201
+ if step > 1:
202
+ mask &= self._ascending_count % step == 0
203
+
204
+ elif start >= 0:
205
+ mask &= self._ascending_count >= start
206
+
207
+ if step > 1:
208
+ mask &= (self._ascending_count - start) % step == 0
209
+
210
+ else:
211
+ mask &= self._descending_count < -start
212
+
213
+ offset_array = self._descending_count + start + 1
214
+ limit_array = (
215
+ self._ascending_count + self._descending_count + (start + 1)
216
+ ) < 0
217
+ offset_array = np.where(limit_array, self._ascending_count, offset_array)
218
+
219
+ mask &= offset_array % step == 0
220
+
221
+ if stop is not None:
222
+ if stop >= 0:
223
+ mask &= self._ascending_count < stop
224
+ else:
225
+ mask &= self._descending_count >= -stop
226
+
227
+ return mask
228
+
229
+ @cache_readonly
230
+ def _ascending_count(self) -> np.ndarray:
231
+ if TYPE_CHECKING:
232
+ groupby_self = cast(groupby.GroupBy, self)
233
+ else:
234
+ groupby_self = self
235
+
236
+ return groupby_self._cumcount_array()
237
+
238
+ @cache_readonly
239
+ def _descending_count(self) -> np.ndarray:
240
+ if TYPE_CHECKING:
241
+ groupby_self = cast(groupby.GroupBy, self)
242
+ else:
243
+ groupby_self = self
244
+
245
+ return groupby_self._cumcount_array(ascending=False)
246
+
247
+
248
+ @doc(GroupByIndexingMixin._positional_selector)
249
+ class GroupByPositionalSelector:
250
+ def __init__(self, groupby_object: groupby.GroupBy) -> None:
251
+ self.groupby_object = groupby_object
252
+
253
+ def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series:
254
+ """
255
+ Select by positional index per group.
256
+
257
+ Implements GroupBy._positional_selector
258
+
259
+ Parameters
260
+ ----------
261
+ arg : PositionalIndexer | tuple
262
+ Allowed values are:
263
+ - int
264
+ - int valued iterable such as list or range
265
+ - slice with step either None or positive
266
+ - tuple of integers and slices
267
+
268
+ Returns
269
+ -------
270
+ Series
271
+ The filtered subset of the original groupby Series.
272
+ DataFrame
273
+ The filtered subset of the original groupby DataFrame.
274
+
275
+ See Also
276
+ --------
277
+ DataFrame.iloc : Integer-location based indexing for selection by position.
278
+ GroupBy.head : Return first n rows of each group.
279
+ GroupBy.tail : Return last n rows of each group.
280
+ GroupBy._positional_selector : Return positional selection for each group.
281
+ GroupBy.nth : Take the nth row from each group if n is an int, or a
282
+ subset of rows, if n is a list of ints.
283
+ """
284
+ mask = self.groupby_object._make_mask_from_positional_indexer(arg)
285
+ return self.groupby_object._mask_selected_obj(mask)
286
+
287
+
288
+ class GroupByNthSelector:
289
+ """
290
+ Dynamically substituted for GroupBy.nth to enable both call and index
291
+ """
292
+
293
+ def __init__(self, groupby_object: groupby.GroupBy) -> None:
294
+ self.groupby_object = groupby_object
295
+
296
+ def __call__(
297
+ self,
298
+ n: PositionalIndexer | tuple,
299
+ dropna: Literal["any", "all", None] = None,
300
+ ) -> DataFrame | Series:
301
+ return self.groupby_object._nth(n, dropna)
302
+
303
+ def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series:
304
+ return self.groupby_object._nth(n)
vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/ops.py ADDED
@@ -0,0 +1,1208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provide classes to perform the groupby aggregate operations.
3
+
4
+ These are not exposed to the user and provide implementations of the grouping
5
+ operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
6
+ are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
7
+ """
8
+ from __future__ import annotations
9
+
10
+ import collections
11
+ import functools
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Callable,
15
+ Generic,
16
+ final,
17
+ )
18
+
19
+ import numpy as np
20
+
21
+ from pandas._libs import (
22
+ NaT,
23
+ lib,
24
+ )
25
+ import pandas._libs.groupby as libgroupby
26
+ from pandas._typing import (
27
+ ArrayLike,
28
+ AxisInt,
29
+ NDFrameT,
30
+ Shape,
31
+ npt,
32
+ )
33
+ from pandas.errors import AbstractMethodError
34
+ from pandas.util._decorators import cache_readonly
35
+
36
+ from pandas.core.dtypes.cast import (
37
+ maybe_cast_pointwise_result,
38
+ maybe_downcast_to_dtype,
39
+ )
40
+ from pandas.core.dtypes.common import (
41
+ ensure_float64,
42
+ ensure_int64,
43
+ ensure_platform_int,
44
+ ensure_uint64,
45
+ is_1d_only_ea_dtype,
46
+ )
47
+ from pandas.core.dtypes.missing import (
48
+ isna,
49
+ maybe_fill,
50
+ )
51
+
52
+ from pandas.core.frame import DataFrame
53
+ from pandas.core.groupby import grouper
54
+ from pandas.core.indexes.api import (
55
+ CategoricalIndex,
56
+ Index,
57
+ MultiIndex,
58
+ ensure_index,
59
+ )
60
+ from pandas.core.series import Series
61
+ from pandas.core.sorting import (
62
+ compress_group_index,
63
+ decons_obs_group_ids,
64
+ get_flattened_list,
65
+ get_group_index,
66
+ get_group_index_sorter,
67
+ get_indexer_dict,
68
+ )
69
+
70
+ if TYPE_CHECKING:
71
+ from collections.abc import (
72
+ Hashable,
73
+ Iterator,
74
+ Sequence,
75
+ )
76
+
77
+ from pandas.core.generic import NDFrame
78
+
79
+
80
+ def check_result_array(obj, dtype) -> None:
81
+ # Our operation is supposed to be an aggregation/reduction. If
82
+ # it returns an ndarray, this likely means an invalid operation has
83
+ # been passed. See test_apply_without_aggregation, test_agg_must_agg
84
+ if isinstance(obj, np.ndarray):
85
+ if dtype != object:
86
+ # If it is object dtype, the function can be a reduction/aggregation
87
+ # and still return an ndarray e.g. test_agg_over_numpy_arrays
88
+ raise ValueError("Must produce aggregated value")
89
+
90
+
91
+ def extract_result(res):
92
+ """
93
+ Extract the result object, it might be a 0-dim ndarray
94
+ or a len-1 0-dim, or a scalar
95
+ """
96
+ if hasattr(res, "_values"):
97
+ # Preserve EA
98
+ res = res._values
99
+ if res.ndim == 1 and len(res) == 1:
100
+ # see test_agg_lambda_with_timezone, test_resampler_grouper.py::test_apply
101
+ res = res[0]
102
+ return res
103
+
104
+
105
+ class WrappedCythonOp:
106
+ """
107
+ Dispatch logic for functions defined in _libs.groupby
108
+
109
+ Parameters
110
+ ----------
111
+ kind: str
112
+ Whether the operation is an aggregate or transform.
113
+ how: str
114
+ Operation name, e.g. "mean".
115
+ has_dropped_na: bool
116
+ True precisely when dropna=True and the grouper contains a null value.
117
+ """
118
+
119
+ # Functions for which we do _not_ attempt to cast the cython result
120
+ # back to the original dtype.
121
+ cast_blocklist = frozenset(
122
+ ["any", "all", "rank", "count", "size", "idxmin", "idxmax"]
123
+ )
124
+
125
+ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:
126
+ self.kind = kind
127
+ self.how = how
128
+ self.has_dropped_na = has_dropped_na
129
+
130
+ _CYTHON_FUNCTIONS: dict[str, dict] = {
131
+ "aggregate": {
132
+ "any": functools.partial(libgroupby.group_any_all, val_test="any"),
133
+ "all": functools.partial(libgroupby.group_any_all, val_test="all"),
134
+ "sum": "group_sum",
135
+ "prod": "group_prod",
136
+ "idxmin": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmin"),
137
+ "idxmax": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmax"),
138
+ "min": "group_min",
139
+ "max": "group_max",
140
+ "mean": "group_mean",
141
+ "median": "group_median_float64",
142
+ "var": "group_var",
143
+ "std": functools.partial(libgroupby.group_var, name="std"),
144
+ "sem": functools.partial(libgroupby.group_var, name="sem"),
145
+ "skew": "group_skew",
146
+ "first": "group_nth",
147
+ "last": "group_last",
148
+ "ohlc": "group_ohlc",
149
+ },
150
+ "transform": {
151
+ "cumprod": "group_cumprod",
152
+ "cumsum": "group_cumsum",
153
+ "cummin": "group_cummin",
154
+ "cummax": "group_cummax",
155
+ "rank": "group_rank",
156
+ },
157
+ }
158
+
159
+ _cython_arity = {"ohlc": 4} # OHLC
160
+
161
+ @classmethod
162
+ def get_kind_from_how(cls, how: str) -> str:
163
+ if how in cls._CYTHON_FUNCTIONS["aggregate"]:
164
+ return "aggregate"
165
+ return "transform"
166
+
167
+ # Note: we make this a classmethod and pass kind+how so that caching
168
+ # works at the class level and not the instance level
169
+ @classmethod
170
+ @functools.cache
171
+ def _get_cython_function(
172
+ cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
173
+ ):
174
+ dtype_str = dtype.name
175
+ ftype = cls._CYTHON_FUNCTIONS[kind][how]
176
+
177
+ # see if there is a fused-type version of function
178
+ # only valid for numeric
179
+ if callable(ftype):
180
+ f = ftype
181
+ else:
182
+ f = getattr(libgroupby, ftype)
183
+ if is_numeric:
184
+ return f
185
+ elif dtype == np.dtype(object):
186
+ if how in ["median", "cumprod"]:
187
+ # no fused types -> no __signatures__
188
+ raise NotImplementedError(
189
+ f"function is not implemented for this dtype: "
190
+ f"[how->{how},dtype->{dtype_str}]"
191
+ )
192
+ elif how in ["std", "sem", "idxmin", "idxmax"]:
193
+ # We have a partial object that does not have __signatures__
194
+ return f
195
+ elif how == "skew":
196
+ # _get_cython_vals will convert to float64
197
+ pass
198
+ elif "object" not in f.__signatures__:
199
+ # raise NotImplementedError here rather than TypeError later
200
+ raise NotImplementedError(
201
+ f"function is not implemented for this dtype: "
202
+ f"[how->{how},dtype->{dtype_str}]"
203
+ )
204
+ return f
205
+ else:
206
+ raise NotImplementedError(
207
+ "This should not be reached. Please report a bug at "
208
+ "github.com/pandas-dev/pandas/",
209
+ dtype,
210
+ )
211
+
212
+ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray:
213
+ """
214
+ Cast numeric dtypes to float64 for functions that only support that.
215
+
216
+ Parameters
217
+ ----------
218
+ values : np.ndarray
219
+
220
+ Returns
221
+ -------
222
+ values : np.ndarray
223
+ """
224
+ how = self.how
225
+
226
+ if how in ["median", "std", "sem", "skew"]:
227
+ # median only has a float64 implementation
228
+ # We should only get here with is_numeric, as non-numeric cases
229
+ # should raise in _get_cython_function
230
+ values = ensure_float64(values)
231
+
232
+ elif values.dtype.kind in "iu":
233
+ if how in ["var", "mean"] or (
234
+ self.kind == "transform" and self.has_dropped_na
235
+ ):
236
+ # has_dropped_na check need for test_null_group_str_transformer
237
+ # result may still include NaN, so we have to cast
238
+ values = ensure_float64(values)
239
+
240
+ elif how in ["sum", "ohlc", "prod", "cumsum", "cumprod"]:
241
+ # Avoid overflow during group op
242
+ if values.dtype.kind == "i":
243
+ values = ensure_int64(values)
244
+ else:
245
+ values = ensure_uint64(values)
246
+
247
+ return values
248
+
249
+ def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
250
+ how = self.how
251
+ kind = self.kind
252
+
253
+ arity = self._cython_arity.get(how, 1)
254
+
255
+ out_shape: Shape
256
+ if how == "ohlc":
257
+ out_shape = (ngroups, arity)
258
+ elif arity > 1:
259
+ raise NotImplementedError(
260
+ "arity of more than 1 is not supported for the 'how' argument"
261
+ )
262
+ elif kind == "transform":
263
+ out_shape = values.shape
264
+ else:
265
+ out_shape = (ngroups,) + values.shape[1:]
266
+ return out_shape
267
+
268
+ def _get_out_dtype(self, dtype: np.dtype) -> np.dtype:
269
+ how = self.how
270
+
271
+ if how == "rank":
272
+ out_dtype = "float64"
273
+ elif how in ["idxmin", "idxmax"]:
274
+ # The Cython implementation only produces the row number; we'll take
275
+ # from the index using this in post processing
276
+ out_dtype = "intp"
277
+ else:
278
+ if dtype.kind in "iufcb":
279
+ out_dtype = f"{dtype.kind}{dtype.itemsize}"
280
+ else:
281
+ out_dtype = "object"
282
+ return np.dtype(out_dtype)
283
+
284
+ def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
285
+ """
286
+ Get the desired dtype of a result based on the
287
+ input dtype and how it was computed.
288
+
289
+ Parameters
290
+ ----------
291
+ dtype : np.dtype
292
+
293
+ Returns
294
+ -------
295
+ np.dtype
296
+ The desired dtype of the result.
297
+ """
298
+ how = self.how
299
+
300
+ if how in ["sum", "cumsum", "sum", "prod", "cumprod"]:
301
+ if dtype == np.dtype(bool):
302
+ return np.dtype(np.int64)
303
+ elif how in ["mean", "median", "var", "std", "sem"]:
304
+ if dtype.kind in "fc":
305
+ return dtype
306
+ elif dtype.kind in "iub":
307
+ return np.dtype(np.float64)
308
+ return dtype
309
+
310
+ @final
311
+ def _cython_op_ndim_compat(
312
+ self,
313
+ values: np.ndarray,
314
+ *,
315
+ min_count: int,
316
+ ngroups: int,
317
+ comp_ids: np.ndarray,
318
+ mask: npt.NDArray[np.bool_] | None = None,
319
+ result_mask: npt.NDArray[np.bool_] | None = None,
320
+ **kwargs,
321
+ ) -> np.ndarray:
322
+ if values.ndim == 1:
323
+ # expand to 2d, dispatch, then squeeze if appropriate
324
+ values2d = values[None, :]
325
+ if mask is not None:
326
+ mask = mask[None, :]
327
+ if result_mask is not None:
328
+ result_mask = result_mask[None, :]
329
+ res = self._call_cython_op(
330
+ values2d,
331
+ min_count=min_count,
332
+ ngroups=ngroups,
333
+ comp_ids=comp_ids,
334
+ mask=mask,
335
+ result_mask=result_mask,
336
+ **kwargs,
337
+ )
338
+ if res.shape[0] == 1:
339
+ return res[0]
340
+
341
+ # otherwise we have OHLC
342
+ return res.T
343
+
344
+ return self._call_cython_op(
345
+ values,
346
+ min_count=min_count,
347
+ ngroups=ngroups,
348
+ comp_ids=comp_ids,
349
+ mask=mask,
350
+ result_mask=result_mask,
351
+ **kwargs,
352
+ )
353
+
354
+ @final
355
+ def _call_cython_op(
356
+ self,
357
+ values: np.ndarray, # np.ndarray[ndim=2]
358
+ *,
359
+ min_count: int,
360
+ ngroups: int,
361
+ comp_ids: np.ndarray,
362
+ mask: npt.NDArray[np.bool_] | None,
363
+ result_mask: npt.NDArray[np.bool_] | None,
364
+ **kwargs,
365
+ ) -> np.ndarray: # np.ndarray[ndim=2]
366
+ orig_values = values
367
+
368
+ dtype = values.dtype
369
+ is_numeric = dtype.kind in "iufcb"
370
+
371
+ is_datetimelike = dtype.kind in "mM"
372
+
373
+ if is_datetimelike:
374
+ values = values.view("int64")
375
+ is_numeric = True
376
+ elif dtype.kind == "b":
377
+ values = values.view("uint8")
378
+ if values.dtype == "float16":
379
+ values = values.astype(np.float32)
380
+
381
+ if self.how in ["any", "all"]:
382
+ if mask is None:
383
+ mask = isna(values)
384
+ if dtype == object:
385
+ if kwargs["skipna"]:
386
+ # GH#37501: don't raise on pd.NA when skipna=True
387
+ if mask.any():
388
+ # mask on original values computed separately
389
+ values = values.copy()
390
+ values[mask] = True
391
+ values = values.astype(bool, copy=False).view(np.int8)
392
+ is_numeric = True
393
+
394
+ values = values.T
395
+ if mask is not None:
396
+ mask = mask.T
397
+ if result_mask is not None:
398
+ result_mask = result_mask.T
399
+
400
+ out_shape = self._get_output_shape(ngroups, values)
401
+ func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric)
402
+ values = self._get_cython_vals(values)
403
+ out_dtype = self._get_out_dtype(values.dtype)
404
+
405
+ result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
406
+ if self.kind == "aggregate":
407
+ counts = np.zeros(ngroups, dtype=np.int64)
408
+ if self.how in [
409
+ "idxmin",
410
+ "idxmax",
411
+ "min",
412
+ "max",
413
+ "mean",
414
+ "last",
415
+ "first",
416
+ "sum",
417
+ ]:
418
+ func(
419
+ out=result,
420
+ counts=counts,
421
+ values=values,
422
+ labels=comp_ids,
423
+ min_count=min_count,
424
+ mask=mask,
425
+ result_mask=result_mask,
426
+ is_datetimelike=is_datetimelike,
427
+ **kwargs,
428
+ )
429
+ elif self.how in ["sem", "std", "var", "ohlc", "prod", "median"]:
430
+ if self.how in ["std", "sem"]:
431
+ kwargs["is_datetimelike"] = is_datetimelike
432
+ func(
433
+ result,
434
+ counts,
435
+ values,
436
+ comp_ids,
437
+ min_count=min_count,
438
+ mask=mask,
439
+ result_mask=result_mask,
440
+ **kwargs,
441
+ )
442
+ elif self.how in ["any", "all"]:
443
+ func(
444
+ out=result,
445
+ values=values,
446
+ labels=comp_ids,
447
+ mask=mask,
448
+ result_mask=result_mask,
449
+ **kwargs,
450
+ )
451
+ result = result.astype(bool, copy=False)
452
+ elif self.how in ["skew"]:
453
+ func(
454
+ out=result,
455
+ counts=counts,
456
+ values=values,
457
+ labels=comp_ids,
458
+ mask=mask,
459
+ result_mask=result_mask,
460
+ **kwargs,
461
+ )
462
+ if dtype == object:
463
+ result = result.astype(object)
464
+
465
+ else:
466
+ raise NotImplementedError(f"{self.how} is not implemented")
467
+ else:
468
+ # TODO: min_count
469
+ if self.how != "rank":
470
+ # TODO: should rank take result_mask?
471
+ kwargs["result_mask"] = result_mask
472
+ func(
473
+ out=result,
474
+ values=values,
475
+ labels=comp_ids,
476
+ ngroups=ngroups,
477
+ is_datetimelike=is_datetimelike,
478
+ mask=mask,
479
+ **kwargs,
480
+ )
481
+
482
+ if self.kind == "aggregate" and self.how not in ["idxmin", "idxmax"]:
483
+ # i.e. counts is defined. Locations where count<min_count
484
+ # need to have the result set to np.nan, which may require casting,
485
+ # see GH#40767. For idxmin/idxmax is handled specially via post-processing
486
+ if result.dtype.kind in "iu" and not is_datetimelike:
487
+ # if the op keeps the int dtypes, we have to use 0
488
+ cutoff = max(0 if self.how in ["sum", "prod"] else 1, min_count)
489
+ empty_groups = counts < cutoff
490
+ if empty_groups.any():
491
+ if result_mask is not None:
492
+ assert result_mask[empty_groups].all()
493
+ else:
494
+ # Note: this conversion could be lossy, see GH#40767
495
+ result = result.astype("float64")
496
+ result[empty_groups] = np.nan
497
+
498
+ result = result.T
499
+
500
+ if self.how not in self.cast_blocklist:
501
+ # e.g. if we are int64 and need to restore to datetime64/timedelta64
502
+ # "rank" is the only member of cast_blocklist we get here
503
+ # Casting only needed for float16, bool, datetimelike,
504
+ # and self.how in ["sum", "prod", "ohlc", "cumprod"]
505
+ res_dtype = self._get_result_dtype(orig_values.dtype)
506
+ op_result = maybe_downcast_to_dtype(result, res_dtype)
507
+ else:
508
+ op_result = result
509
+
510
+ return op_result
511
+
512
+ @final
513
+ def _validate_axis(self, axis: AxisInt, values: ArrayLike) -> None:
514
+ if values.ndim > 2:
515
+ raise NotImplementedError("number of dimensions is currently limited to 2")
516
+ if values.ndim == 2:
517
+ assert axis == 1, axis
518
+ elif not is_1d_only_ea_dtype(values.dtype):
519
+ # Note: it is *not* the case that axis is always 0 for 1-dim values,
520
+ # as we can have 1D ExtensionArrays that we need to treat as 2D
521
+ assert axis == 0
522
+
523
+ @final
524
+ def cython_operation(
525
+ self,
526
+ *,
527
+ values: ArrayLike,
528
+ axis: AxisInt,
529
+ min_count: int = -1,
530
+ comp_ids: np.ndarray,
531
+ ngroups: int,
532
+ **kwargs,
533
+ ) -> ArrayLike:
534
+ """
535
+ Call our cython function, with appropriate pre- and post- processing.
536
+ """
537
+ self._validate_axis(axis, values)
538
+
539
+ if not isinstance(values, np.ndarray):
540
+ # i.e. ExtensionArray
541
+ return values._groupby_op(
542
+ how=self.how,
543
+ has_dropped_na=self.has_dropped_na,
544
+ min_count=min_count,
545
+ ngroups=ngroups,
546
+ ids=comp_ids,
547
+ **kwargs,
548
+ )
549
+
550
+ return self._cython_op_ndim_compat(
551
+ values,
552
+ min_count=min_count,
553
+ ngroups=ngroups,
554
+ comp_ids=comp_ids,
555
+ mask=None,
556
+ **kwargs,
557
+ )
558
+
559
+
560
+ class BaseGrouper:
561
+ """
562
+ This is an internal Grouper class, which actually holds
563
+ the generated groups
564
+
565
+ Parameters
566
+ ----------
567
+ axis : Index
568
+ groupings : Sequence[Grouping]
569
+ all the grouping instances to handle in this grouper
570
+ for example for grouper list to groupby, need to pass the list
571
+ sort : bool, default True
572
+ whether this grouper will give sorted result or not
573
+
574
+ """
575
+
576
+ axis: Index
577
+
578
+ def __init__(
579
+ self,
580
+ axis: Index,
581
+ groupings: Sequence[grouper.Grouping],
582
+ sort: bool = True,
583
+ dropna: bool = True,
584
+ ) -> None:
585
+ assert isinstance(axis, Index), axis
586
+
587
+ self.axis = axis
588
+ self._groupings: list[grouper.Grouping] = list(groupings)
589
+ self._sort = sort
590
+ self.dropna = dropna
591
+
592
+ @property
593
+ def groupings(self) -> list[grouper.Grouping]:
594
+ return self._groupings
595
+
596
+ @property
597
+ def shape(self) -> Shape:
598
+ return tuple(ping.ngroups for ping in self.groupings)
599
+
600
+ def __iter__(self) -> Iterator[Hashable]:
601
+ return iter(self.indices)
602
+
603
+ @property
604
+ def nkeys(self) -> int:
605
+ return len(self.groupings)
606
+
607
+ def get_iterator(
608
+ self, data: NDFrameT, axis: AxisInt = 0
609
+ ) -> Iterator[tuple[Hashable, NDFrameT]]:
610
+ """
611
+ Groupby iterator
612
+
613
+ Returns
614
+ -------
615
+ Generator yielding sequence of (name, subsetted object)
616
+ for each group
617
+ """
618
+ splitter = self._get_splitter(data, axis=axis)
619
+ keys = self.group_keys_seq
620
+ yield from zip(keys, splitter)
621
+
622
+ @final
623
+ def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter:
624
+ """
625
+ Returns
626
+ -------
627
+ Generator yielding subsetted objects
628
+ """
629
+ ids, _, ngroups = self.group_info
630
+ return _get_splitter(
631
+ data,
632
+ ids,
633
+ ngroups,
634
+ sorted_ids=self._sorted_ids,
635
+ sort_idx=self._sort_idx,
636
+ axis=axis,
637
+ )
638
+
639
+ @final
640
+ @cache_readonly
641
+ def group_keys_seq(self):
642
+ if len(self.groupings) == 1:
643
+ return self.levels[0]
644
+ else:
645
+ ids, _, ngroups = self.group_info
646
+
647
+ # provide "flattened" iterator for multi-group setting
648
+ return get_flattened_list(ids, ngroups, self.levels, self.codes)
649
+
650
+ @cache_readonly
651
+ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
652
+ """dict {group name -> group indices}"""
653
+ if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):
654
+ # This shows unused categories in indices GH#38642
655
+ return self.groupings[0].indices
656
+ codes_list = [ping.codes for ping in self.groupings]
657
+ keys = [ping._group_index for ping in self.groupings]
658
+ return get_indexer_dict(codes_list, keys)
659
+
660
+ @final
661
+ def result_ilocs(self) -> npt.NDArray[np.intp]:
662
+ """
663
+ Get the original integer locations of result_index in the input.
664
+ """
665
+ # Original indices are where group_index would go via sorting.
666
+ # But when dropna is true, we need to remove null values while accounting for
667
+ # any gaps that then occur because of them.
668
+ group_index = get_group_index(
669
+ self.codes, self.shape, sort=self._sort, xnull=True
670
+ )
671
+ group_index, _ = compress_group_index(group_index, sort=self._sort)
672
+
673
+ if self.has_dropped_na:
674
+ mask = np.where(group_index >= 0)
675
+ # Count how many gaps are caused by previous null values for each position
676
+ null_gaps = np.cumsum(group_index == -1)[mask]
677
+ group_index = group_index[mask]
678
+
679
+ result = get_group_index_sorter(group_index, self.ngroups)
680
+
681
+ if self.has_dropped_na:
682
+ # Shift by the number of prior null gaps
683
+ result += np.take(null_gaps, result)
684
+
685
+ return result
686
+
687
+ @final
688
+ @property
689
+ def codes(self) -> list[npt.NDArray[np.signedinteger]]:
690
+ return [ping.codes for ping in self.groupings]
691
+
692
+ @property
693
+ def levels(self) -> list[Index]:
694
+ return [ping._group_index for ping in self.groupings]
695
+
696
+ @property
697
+ def names(self) -> list[Hashable]:
698
+ return [ping.name for ping in self.groupings]
699
+
700
+ @final
701
+ def size(self) -> Series:
702
+ """
703
+ Compute group sizes.
704
+ """
705
+ ids, _, ngroups = self.group_info
706
+ out: np.ndarray | list
707
+ if ngroups:
708
+ out = np.bincount(ids[ids != -1], minlength=ngroups)
709
+ else:
710
+ out = []
711
+ return Series(out, index=self.result_index, dtype="int64", copy=False)
712
+
713
+ @cache_readonly
714
+ def groups(self) -> dict[Hashable, np.ndarray]:
715
+ """dict {group name -> group labels}"""
716
+ if len(self.groupings) == 1:
717
+ return self.groupings[0].groups
718
+ else:
719
+ to_groupby = []
720
+ for ping in self.groupings:
721
+ gv = ping.grouping_vector
722
+ if not isinstance(gv, BaseGrouper):
723
+ to_groupby.append(gv)
724
+ else:
725
+ to_groupby.append(gv.groupings[0].grouping_vector)
726
+ index = MultiIndex.from_arrays(to_groupby)
727
+ return self.axis.groupby(index)
728
+
729
+ @final
730
+ @cache_readonly
731
+ def is_monotonic(self) -> bool:
732
+ # return if my group orderings are monotonic
733
+ return Index(self.group_info[0]).is_monotonic_increasing
734
+
735
+ @final
736
+ @cache_readonly
737
+ def has_dropped_na(self) -> bool:
738
+ """
739
+ Whether grouper has null value(s) that are dropped.
740
+ """
741
+ return bool((self.group_info[0] < 0).any())
742
+
743
+ @cache_readonly
744
+ def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
745
+ comp_ids, obs_group_ids = self._get_compressed_codes()
746
+
747
+ ngroups = len(obs_group_ids)
748
+ comp_ids = ensure_platform_int(comp_ids)
749
+
750
+ return comp_ids, obs_group_ids, ngroups
751
+
752
+ @cache_readonly
753
+ def codes_info(self) -> npt.NDArray[np.intp]:
754
+ # return the codes of items in original grouped axis
755
+ ids, _, _ = self.group_info
756
+ return ids
757
+
758
+ @final
759
+ def _get_compressed_codes(
760
+ self,
761
+ ) -> tuple[npt.NDArray[np.signedinteger], npt.NDArray[np.intp]]:
762
+ # The first returned ndarray may have any signed integer dtype
763
+ if len(self.groupings) > 1:
764
+ group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)
765
+ return compress_group_index(group_index, sort=self._sort)
766
+ # FIXME: compress_group_index's second return value is int64, not intp
767
+
768
+ ping = self.groupings[0]
769
+ return ping.codes, np.arange(len(ping._group_index), dtype=np.intp)
770
+
771
+ @final
772
+ @cache_readonly
773
+ def ngroups(self) -> int:
774
+ return len(self.result_index)
775
+
776
+ @property
777
+ def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]:
778
+ codes = self.codes
779
+ ids, obs_ids, _ = self.group_info
780
+ return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)
781
+
782
+ @cache_readonly
783
+ def result_index(self) -> Index:
784
+ if len(self.groupings) == 1:
785
+ return self.groupings[0]._result_index.rename(self.names[0])
786
+
787
+ codes = self.reconstructed_codes
788
+ levels = [ping._result_index for ping in self.groupings]
789
+ return MultiIndex(
790
+ levels=levels, codes=codes, verify_integrity=False, names=self.names
791
+ )
792
+
793
+ @final
794
+ def get_group_levels(self) -> list[ArrayLike]:
795
+ # Note: only called from _insert_inaxis_grouper, which
796
+ # is only called for BaseGrouper, never for BinGrouper
797
+ if len(self.groupings) == 1:
798
+ return [self.groupings[0]._group_arraylike]
799
+
800
+ name_list = []
801
+ for ping, codes in zip(self.groupings, self.reconstructed_codes):
802
+ codes = ensure_platform_int(codes)
803
+ levels = ping._group_arraylike.take(codes)
804
+
805
+ name_list.append(levels)
806
+
807
+ return name_list
808
+
809
+ # ------------------------------------------------------------
810
+ # Aggregation functions
811
+
812
+ @final
813
+ def _cython_operation(
814
+ self,
815
+ kind: str,
816
+ values,
817
+ how: str,
818
+ axis: AxisInt,
819
+ min_count: int = -1,
820
+ **kwargs,
821
+ ) -> ArrayLike:
822
+ """
823
+ Returns the values of a cython operation.
824
+ """
825
+ assert kind in ["transform", "aggregate"]
826
+
827
+ cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na)
828
+
829
+ ids, _, _ = self.group_info
830
+ ngroups = self.ngroups
831
+ return cy_op.cython_operation(
832
+ values=values,
833
+ axis=axis,
834
+ min_count=min_count,
835
+ comp_ids=ids,
836
+ ngroups=ngroups,
837
+ **kwargs,
838
+ )
839
+
840
+ @final
841
+ def agg_series(
842
+ self, obj: Series, func: Callable, preserve_dtype: bool = False
843
+ ) -> ArrayLike:
844
+ """
845
+ Parameters
846
+ ----------
847
+ obj : Series
848
+ func : function taking a Series and returning a scalar-like
849
+ preserve_dtype : bool
850
+ Whether the aggregation is known to be dtype-preserving.
851
+
852
+ Returns
853
+ -------
854
+ np.ndarray or ExtensionArray
855
+ """
856
+
857
+ if not isinstance(obj._values, np.ndarray):
858
+ # we can preserve a little bit more aggressively with EA dtype
859
+ # because maybe_cast_pointwise_result will do a try/except
860
+ # with _from_sequence. NB we are assuming here that _from_sequence
861
+ # is sufficiently strict that it casts appropriately.
862
+ preserve_dtype = True
863
+
864
+ result = self._aggregate_series_pure_python(obj, func)
865
+
866
+ npvalues = lib.maybe_convert_objects(result, try_float=False)
867
+ if preserve_dtype:
868
+ out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)
869
+ else:
870
+ out = npvalues
871
+ return out
872
+
873
+ @final
874
+ def _aggregate_series_pure_python(
875
+ self, obj: Series, func: Callable
876
+ ) -> npt.NDArray[np.object_]:
877
+ _, _, ngroups = self.group_info
878
+
879
+ result = np.empty(ngroups, dtype="O")
880
+ initialized = False
881
+
882
+ splitter = self._get_splitter(obj, axis=0)
883
+
884
+ for i, group in enumerate(splitter):
885
+ res = func(group)
886
+ res = extract_result(res)
887
+
888
+ if not initialized:
889
+ # We only do this validation on the first iteration
890
+ check_result_array(res, group.dtype)
891
+ initialized = True
892
+
893
+ result[i] = res
894
+
895
+ return result
896
+
897
+ @final
898
+ def apply_groupwise(
899
+ self, f: Callable, data: DataFrame | Series, axis: AxisInt = 0
900
+ ) -> tuple[list, bool]:
901
+ mutated = False
902
+ splitter = self._get_splitter(data, axis=axis)
903
+ group_keys = self.group_keys_seq
904
+ result_values = []
905
+
906
+ # This calls DataSplitter.__iter__
907
+ zipped = zip(group_keys, splitter)
908
+
909
+ for key, group in zipped:
910
+ # Pinning name is needed for
911
+ # test_group_apply_once_per_group,
912
+ # test_inconsistent_return_type, test_set_group_name,
913
+ # test_group_name_available_in_inference_pass,
914
+ # test_groupby_multi_timezone
915
+ object.__setattr__(group, "name", key)
916
+
917
+ # group might be modified
918
+ group_axes = group.axes
919
+ res = f(group)
920
+ if not mutated and not _is_indexed_like(res, group_axes, axis):
921
+ mutated = True
922
+ result_values.append(res)
923
+ # getattr pattern for __name__ is needed for functools.partial objects
924
+ if len(group_keys) == 0 and getattr(f, "__name__", None) in [
925
+ "skew",
926
+ "sum",
927
+ "prod",
928
+ ]:
929
+ # If group_keys is empty, then no function calls have been made,
930
+ # so we will not have raised even if this is an invalid dtype.
931
+ # So do one dummy call here to raise appropriate TypeError.
932
+ f(data.iloc[:0])
933
+
934
+ return result_values, mutated
935
+
936
+ # ------------------------------------------------------------
937
+ # Methods for sorting subsets of our GroupBy's object
938
+
939
+ @final
940
+ @cache_readonly
941
+ def _sort_idx(self) -> npt.NDArray[np.intp]:
942
+ # Counting sort indexer
943
+ ids, _, ngroups = self.group_info
944
+ return get_group_index_sorter(ids, ngroups)
945
+
946
+ @final
947
+ @cache_readonly
948
+ def _sorted_ids(self) -> npt.NDArray[np.intp]:
949
+ ids, _, _ = self.group_info
950
+ return ids.take(self._sort_idx)
951
+
952
+
953
+ class BinGrouper(BaseGrouper):
954
+ """
955
+ This is an internal Grouper class
956
+
957
+ Parameters
958
+ ----------
959
+ bins : the split index of binlabels to group the item of axis
960
+ binlabels : the label list
961
+ indexer : np.ndarray[np.intp], optional
962
+ the indexer created by Grouper
963
+ some groupers (TimeGrouper) will sort its axis and its
964
+ group_info is also sorted, so need the indexer to reorder
965
+
966
+ Examples
967
+ --------
968
+ bins: [2, 4, 6, 8, 10]
969
+ binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
970
+ '2005-01-05', '2005-01-07', '2005-01-09'],
971
+ dtype='datetime64[ns]', freq='2D')
972
+
973
+ the group_info, which contains the label of each item in grouped
974
+ axis, the index of label in label list, group number, is
975
+
976
+ (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
977
+
978
+ means that, the grouped axis has 10 items, can be grouped into 5
979
+ labels, the first and second items belong to the first label, the
980
+ third and forth items belong to the second label, and so on
981
+
982
+ """
983
+
984
+ bins: npt.NDArray[np.int64]
985
+ binlabels: Index
986
+
987
+ def __init__(
988
+ self,
989
+ bins,
990
+ binlabels,
991
+ indexer=None,
992
+ ) -> None:
993
+ self.bins = ensure_int64(bins)
994
+ self.binlabels = ensure_index(binlabels)
995
+ self.indexer = indexer
996
+
997
+ # These lengths must match, otherwise we could call agg_series
998
+ # with empty self.bins, which would raise later.
999
+ assert len(self.binlabels) == len(self.bins)
1000
+
1001
+ @cache_readonly
1002
+ def groups(self):
1003
+ """dict {group name -> group labels}"""
1004
+ # this is mainly for compat
1005
+ # GH 3881
1006
+ result = {
1007
+ key: value
1008
+ for key, value in zip(self.binlabels, self.bins)
1009
+ if key is not NaT
1010
+ }
1011
+ return result
1012
+
1013
+ @property
1014
+ def nkeys(self) -> int:
1015
+ # still matches len(self.groupings), but we can hard-code
1016
+ return 1
1017
+
1018
+ @cache_readonly
1019
+ def codes_info(self) -> npt.NDArray[np.intp]:
1020
+ # return the codes of items in original grouped axis
1021
+ ids, _, _ = self.group_info
1022
+ if self.indexer is not None:
1023
+ sorter = np.lexsort((ids, self.indexer))
1024
+ ids = ids[sorter]
1025
+ return ids
1026
+
1027
+ def get_iterator(self, data: NDFrame, axis: AxisInt = 0):
1028
+ """
1029
+ Groupby iterator
1030
+
1031
+ Returns
1032
+ -------
1033
+ Generator yielding sequence of (name, subsetted object)
1034
+ for each group
1035
+ """
1036
+ if axis == 0:
1037
+ slicer = lambda start, edge: data.iloc[start:edge]
1038
+ else:
1039
+ slicer = lambda start, edge: data.iloc[:, start:edge]
1040
+
1041
+ length = len(data.axes[axis])
1042
+
1043
+ start = 0
1044
+ for edge, label in zip(self.bins, self.binlabels):
1045
+ if label is not NaT:
1046
+ yield label, slicer(start, edge)
1047
+ start = edge
1048
+
1049
+ if start < length:
1050
+ yield self.binlabels[-1], slicer(start, None)
1051
+
1052
+ @cache_readonly
1053
+ def indices(self):
1054
+ indices = collections.defaultdict(list)
1055
+
1056
+ i = 0
1057
+ for label, bin in zip(self.binlabels, self.bins):
1058
+ if i < bin:
1059
+ if label is not NaT:
1060
+ indices[label] = list(range(i, bin))
1061
+ i = bin
1062
+ return indices
1063
+
1064
+ @cache_readonly
1065
+ def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
1066
+ ngroups = self.ngroups
1067
+ obs_group_ids = np.arange(ngroups, dtype=np.intp)
1068
+ rep = np.diff(np.r_[0, self.bins])
1069
+
1070
+ rep = ensure_platform_int(rep)
1071
+ if ngroups == len(self.bins):
1072
+ comp_ids = np.repeat(np.arange(ngroups), rep)
1073
+ else:
1074
+ comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
1075
+
1076
+ return (
1077
+ ensure_platform_int(comp_ids),
1078
+ obs_group_ids,
1079
+ ngroups,
1080
+ )
1081
+
1082
+ @cache_readonly
1083
+ def reconstructed_codes(self) -> list[np.ndarray]:
1084
+ # get unique result indices, and prepend 0 as groupby starts from the first
1085
+ return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
1086
+
1087
+ @cache_readonly
1088
+ def result_index(self) -> Index:
1089
+ if len(self.binlabels) != 0 and isna(self.binlabels[0]):
1090
+ return self.binlabels[1:]
1091
+
1092
+ return self.binlabels
1093
+
1094
+ @property
1095
+ def levels(self) -> list[Index]:
1096
+ return [self.binlabels]
1097
+
1098
+ @property
1099
+ def names(self) -> list[Hashable]:
1100
+ return [self.binlabels.name]
1101
+
1102
+ @property
1103
+ def groupings(self) -> list[grouper.Grouping]:
1104
+ lev = self.binlabels
1105
+ codes = self.group_info[0]
1106
+ labels = lev.take(codes)
1107
+ ping = grouper.Grouping(
1108
+ labels, labels, in_axis=False, level=None, uniques=lev._values
1109
+ )
1110
+ return [ping]
1111
+
1112
+
1113
+ def _is_indexed_like(obj, axes, axis: AxisInt) -> bool:
1114
+ if isinstance(obj, Series):
1115
+ if len(axes) > 1:
1116
+ return False
1117
+ return obj.axes[axis].equals(axes[axis])
1118
+ elif isinstance(obj, DataFrame):
1119
+ return obj.axes[axis].equals(axes[axis])
1120
+
1121
+ return False
1122
+
1123
+
1124
+ # ----------------------------------------------------------------------
1125
+ # Splitting / application
1126
+
1127
+
1128
+ class DataSplitter(Generic[NDFrameT]):
1129
+ def __init__(
1130
+ self,
1131
+ data: NDFrameT,
1132
+ labels: npt.NDArray[np.intp],
1133
+ ngroups: int,
1134
+ *,
1135
+ sort_idx: npt.NDArray[np.intp],
1136
+ sorted_ids: npt.NDArray[np.intp],
1137
+ axis: AxisInt = 0,
1138
+ ) -> None:
1139
+ self.data = data
1140
+ self.labels = ensure_platform_int(labels) # _should_ already be np.intp
1141
+ self.ngroups = ngroups
1142
+
1143
+ self._slabels = sorted_ids
1144
+ self._sort_idx = sort_idx
1145
+
1146
+ self.axis = axis
1147
+ assert isinstance(axis, int), axis
1148
+
1149
+ def __iter__(self) -> Iterator:
1150
+ sdata = self._sorted_data
1151
+
1152
+ if self.ngroups == 0:
1153
+ # we are inside a generator, rather than raise StopIteration
1154
+ # we merely return signal the end
1155
+ return
1156
+
1157
+ starts, ends = lib.generate_slices(self._slabels, self.ngroups)
1158
+
1159
+ for start, end in zip(starts, ends):
1160
+ yield self._chop(sdata, slice(start, end))
1161
+
1162
+ @cache_readonly
1163
+ def _sorted_data(self) -> NDFrameT:
1164
+ return self.data.take(self._sort_idx, axis=self.axis)
1165
+
1166
+ def _chop(self, sdata, slice_obj: slice) -> NDFrame:
1167
+ raise AbstractMethodError(self)
1168
+
1169
+
1170
+ class SeriesSplitter(DataSplitter):
1171
+ def _chop(self, sdata: Series, slice_obj: slice) -> Series:
1172
+ # fastpath equivalent to `sdata.iloc[slice_obj]`
1173
+ mgr = sdata._mgr.get_slice(slice_obj)
1174
+ ser = sdata._constructor_from_mgr(mgr, axes=mgr.axes)
1175
+ ser._name = sdata.name
1176
+ return ser.__finalize__(sdata, method="groupby")
1177
+
1178
+
1179
+ class FrameSplitter(DataSplitter):
1180
+ def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
1181
+ # Fastpath equivalent to:
1182
+ # if self.axis == 0:
1183
+ # return sdata.iloc[slice_obj]
1184
+ # else:
1185
+ # return sdata.iloc[:, slice_obj]
1186
+ mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)
1187
+ df = sdata._constructor_from_mgr(mgr, axes=mgr.axes)
1188
+ return df.__finalize__(sdata, method="groupby")
1189
+
1190
+
1191
+ def _get_splitter(
1192
+ data: NDFrame,
1193
+ labels: npt.NDArray[np.intp],
1194
+ ngroups: int,
1195
+ *,
1196
+ sort_idx: npt.NDArray[np.intp],
1197
+ sorted_ids: npt.NDArray[np.intp],
1198
+ axis: AxisInt = 0,
1199
+ ) -> DataSplitter:
1200
+ if isinstance(data, Series):
1201
+ klass: type[DataSplitter] = SeriesSplitter
1202
+ else:
1203
+ # i.e. DataFrame
1204
+ klass = FrameSplitter
1205
+
1206
+ return klass(
1207
+ data, labels, ngroups, sort_idx=sort_idx, sorted_ids=sorted_ids, axis=axis
1208
+ )
vlmpy310/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (691 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__init__.py ADDED
File without changes
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (173 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/category.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimelike.cpython-310.pyc ADDED
Binary file (21.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimes.cpython-310.pyc ADDED
Binary file (32.6 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/extension.cpython-310.pyc ADDED
Binary file (5.18 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/frozen.cpython-310.pyc ADDED
Binary file (4.09 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/interval.cpython-310.pyc ADDED
Binary file (28.9 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/period.cpython-310.pyc ADDED
Binary file (16.4 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/accessors.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ datetimelike delegation
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ cast,
9
+ )
10
+ import warnings
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import lib
15
+ from pandas.util._exceptions import find_stack_level
16
+
17
+ from pandas.core.dtypes.common import (
18
+ is_integer_dtype,
19
+ is_list_like,
20
+ )
21
+ from pandas.core.dtypes.dtypes import (
22
+ ArrowDtype,
23
+ CategoricalDtype,
24
+ DatetimeTZDtype,
25
+ PeriodDtype,
26
+ )
27
+ from pandas.core.dtypes.generic import ABCSeries
28
+
29
+ from pandas.core.accessor import (
30
+ PandasDelegate,
31
+ delegate_names,
32
+ )
33
+ from pandas.core.arrays import (
34
+ DatetimeArray,
35
+ PeriodArray,
36
+ TimedeltaArray,
37
+ )
38
+ from pandas.core.arrays.arrow.array import ArrowExtensionArray
39
+ from pandas.core.base import (
40
+ NoNewAttributesMixin,
41
+ PandasObject,
42
+ )
43
+ from pandas.core.indexes.datetimes import DatetimeIndex
44
+ from pandas.core.indexes.timedeltas import TimedeltaIndex
45
+
46
+ if TYPE_CHECKING:
47
+ from pandas import (
48
+ DataFrame,
49
+ Series,
50
+ )
51
+
52
+
53
+ class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
54
+ _hidden_attrs = PandasObject._hidden_attrs | {
55
+ "orig",
56
+ "name",
57
+ }
58
+
59
+ def __init__(self, data: Series, orig) -> None:
60
+ if not isinstance(data, ABCSeries):
61
+ raise TypeError(
62
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
63
+ )
64
+
65
+ self._parent = data
66
+ self.orig = orig
67
+ self.name = getattr(data, "name", None)
68
+ self._freeze()
69
+
70
+ def _get_values(self):
71
+ data = self._parent
72
+ if lib.is_np_dtype(data.dtype, "M"):
73
+ return DatetimeIndex(data, copy=False, name=self.name)
74
+
75
+ elif isinstance(data.dtype, DatetimeTZDtype):
76
+ return DatetimeIndex(data, copy=False, name=self.name)
77
+
78
+ elif lib.is_np_dtype(data.dtype, "m"):
79
+ return TimedeltaIndex(data, copy=False, name=self.name)
80
+
81
+ elif isinstance(data.dtype, PeriodDtype):
82
+ return PeriodArray(data, copy=False)
83
+
84
+ raise TypeError(
85
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
86
+ )
87
+
88
+ def _delegate_property_get(self, name: str):
89
+ from pandas import Series
90
+
91
+ values = self._get_values()
92
+
93
+ result = getattr(values, name)
94
+
95
+ # maybe need to upcast (ints)
96
+ if isinstance(result, np.ndarray):
97
+ if is_integer_dtype(result):
98
+ result = result.astype("int64")
99
+ elif not is_list_like(result):
100
+ return result
101
+
102
+ result = np.asarray(result)
103
+
104
+ if self.orig is not None:
105
+ index = self.orig.index
106
+ else:
107
+ index = self._parent.index
108
+ # return the result as a Series
109
+ result = Series(result, index=index, name=self.name).__finalize__(self._parent)
110
+
111
+ # setting this object will show a SettingWithCopyWarning/Error
112
+ result._is_copy = (
113
+ "modifications to a property of a datetimelike "
114
+ "object are not supported and are discarded. "
115
+ "Change values on the original."
116
+ )
117
+
118
+ return result
119
+
120
+ def _delegate_property_set(self, name: str, value, *args, **kwargs):
121
+ raise ValueError(
122
+ "modifications to a property of a datetimelike object are not supported. "
123
+ "Change values on the original."
124
+ )
125
+
126
+ def _delegate_method(self, name: str, *args, **kwargs):
127
+ from pandas import Series
128
+
129
+ values = self._get_values()
130
+
131
+ method = getattr(values, name)
132
+ result = method(*args, **kwargs)
133
+
134
+ if not is_list_like(result):
135
+ return result
136
+
137
+ result = Series(result, index=self._parent.index, name=self.name).__finalize__(
138
+ self._parent
139
+ )
140
+
141
+ # setting this object will show a SettingWithCopyWarning/Error
142
+ result._is_copy = (
143
+ "modifications to a method of a datetimelike "
144
+ "object are not supported and are discarded. "
145
+ "Change values on the original."
146
+ )
147
+
148
+ return result
149
+
150
+
151
+ @delegate_names(
152
+ delegate=ArrowExtensionArray,
153
+ accessors=TimedeltaArray._datetimelike_ops,
154
+ typ="property",
155
+ accessor_mapping=lambda x: f"_dt_{x}",
156
+ raise_on_missing=False,
157
+ )
158
+ @delegate_names(
159
+ delegate=ArrowExtensionArray,
160
+ accessors=TimedeltaArray._datetimelike_methods,
161
+ typ="method",
162
+ accessor_mapping=lambda x: f"_dt_{x}",
163
+ raise_on_missing=False,
164
+ )
165
+ @delegate_names(
166
+ delegate=ArrowExtensionArray,
167
+ accessors=DatetimeArray._datetimelike_ops,
168
+ typ="property",
169
+ accessor_mapping=lambda x: f"_dt_{x}",
170
+ raise_on_missing=False,
171
+ )
172
+ @delegate_names(
173
+ delegate=ArrowExtensionArray,
174
+ accessors=DatetimeArray._datetimelike_methods,
175
+ typ="method",
176
+ accessor_mapping=lambda x: f"_dt_{x}",
177
+ raise_on_missing=False,
178
+ )
179
+ class ArrowTemporalProperties(PandasDelegate, PandasObject, NoNewAttributesMixin):
180
+ def __init__(self, data: Series, orig) -> None:
181
+ if not isinstance(data, ABCSeries):
182
+ raise TypeError(
183
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
184
+ )
185
+
186
+ self._parent = data
187
+ self._orig = orig
188
+ self._freeze()
189
+
190
+ def _delegate_property_get(self, name: str):
191
+ if not hasattr(self._parent.array, f"_dt_{name}"):
192
+ raise NotImplementedError(
193
+ f"dt.{name} is not supported for {self._parent.dtype}"
194
+ )
195
+ result = getattr(self._parent.array, f"_dt_{name}")
196
+
197
+ if not is_list_like(result):
198
+ return result
199
+
200
+ if self._orig is not None:
201
+ index = self._orig.index
202
+ else:
203
+ index = self._parent.index
204
+ # return the result as a Series, which is by definition a copy
205
+ result = type(self._parent)(
206
+ result, index=index, name=self._parent.name
207
+ ).__finalize__(self._parent)
208
+
209
+ return result
210
+
211
+ def _delegate_method(self, name: str, *args, **kwargs):
212
+ if not hasattr(self._parent.array, f"_dt_{name}"):
213
+ raise NotImplementedError(
214
+ f"dt.{name} is not supported for {self._parent.dtype}"
215
+ )
216
+
217
+ result = getattr(self._parent.array, f"_dt_{name}")(*args, **kwargs)
218
+
219
+ if self._orig is not None:
220
+ index = self._orig.index
221
+ else:
222
+ index = self._parent.index
223
+ # return the result as a Series, which is by definition a copy
224
+ result = type(self._parent)(
225
+ result, index=index, name=self._parent.name
226
+ ).__finalize__(self._parent)
227
+
228
+ return result
229
+
230
+ def to_pytimedelta(self):
231
+ return cast(ArrowExtensionArray, self._parent.array)._dt_to_pytimedelta()
232
+
233
+ def to_pydatetime(self):
234
+ # GH#20306
235
+ warnings.warn(
236
+ f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, "
237
+ "in a future version this will return a Series containing python "
238
+ "datetime objects instead of an ndarray. To retain the old behavior, "
239
+ "call `np.array` on the result",
240
+ FutureWarning,
241
+ stacklevel=find_stack_level(),
242
+ )
243
+ return cast(ArrowExtensionArray, self._parent.array)._dt_to_pydatetime()
244
+
245
+ def isocalendar(self) -> DataFrame:
246
+ from pandas import DataFrame
247
+
248
+ result = (
249
+ cast(ArrowExtensionArray, self._parent.array)
250
+ ._dt_isocalendar()
251
+ ._pa_array.combine_chunks()
252
+ )
253
+ iso_calendar_df = DataFrame(
254
+ {
255
+ col: type(self._parent.array)(result.field(i)) # type: ignore[call-arg]
256
+ for i, col in enumerate(["year", "week", "day"])
257
+ }
258
+ )
259
+ return iso_calendar_df
260
+
261
+ @property
262
+ def components(self) -> DataFrame:
263
+ from pandas import DataFrame
264
+
265
+ components_df = DataFrame(
266
+ {
267
+ col: getattr(self._parent.array, f"_dt_{col}")
268
+ for col in [
269
+ "days",
270
+ "hours",
271
+ "minutes",
272
+ "seconds",
273
+ "milliseconds",
274
+ "microseconds",
275
+ "nanoseconds",
276
+ ]
277
+ }
278
+ )
279
+ return components_df
280
+
281
+
282
+ @delegate_names(
283
+ delegate=DatetimeArray,
284
+ accessors=DatetimeArray._datetimelike_ops + ["unit"],
285
+ typ="property",
286
+ )
287
+ @delegate_names(
288
+ delegate=DatetimeArray,
289
+ accessors=DatetimeArray._datetimelike_methods + ["as_unit"],
290
+ typ="method",
291
+ )
292
+ class DatetimeProperties(Properties):
293
+ """
294
+ Accessor object for datetimelike properties of the Series values.
295
+
296
+ Examples
297
+ --------
298
+ >>> seconds_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="s"))
299
+ >>> seconds_series
300
+ 0 2000-01-01 00:00:00
301
+ 1 2000-01-01 00:00:01
302
+ 2 2000-01-01 00:00:02
303
+ dtype: datetime64[ns]
304
+ >>> seconds_series.dt.second
305
+ 0 0
306
+ 1 1
307
+ 2 2
308
+ dtype: int32
309
+
310
+ >>> hours_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="h"))
311
+ >>> hours_series
312
+ 0 2000-01-01 00:00:00
313
+ 1 2000-01-01 01:00:00
314
+ 2 2000-01-01 02:00:00
315
+ dtype: datetime64[ns]
316
+ >>> hours_series.dt.hour
317
+ 0 0
318
+ 1 1
319
+ 2 2
320
+ dtype: int32
321
+
322
+ >>> quarters_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="QE"))
323
+ >>> quarters_series
324
+ 0 2000-03-31
325
+ 1 2000-06-30
326
+ 2 2000-09-30
327
+ dtype: datetime64[ns]
328
+ >>> quarters_series.dt.quarter
329
+ 0 1
330
+ 1 2
331
+ 2 3
332
+ dtype: int32
333
+
334
+ Returns a Series indexed like the original Series.
335
+ Raises TypeError if the Series does not contain datetimelike values.
336
+ """
337
+
338
+ def to_pydatetime(self) -> np.ndarray:
339
+ """
340
+ Return the data as an array of :class:`datetime.datetime` objects.
341
+
342
+ .. deprecated:: 2.1.0
343
+
344
+ The current behavior of dt.to_pydatetime is deprecated.
345
+ In a future version this will return a Series containing python
346
+ datetime objects instead of a ndarray.
347
+
348
+ Timezone information is retained if present.
349
+
350
+ .. warning::
351
+
352
+ Python's datetime uses microsecond resolution, which is lower than
353
+ pandas (nanosecond). The values are truncated.
354
+
355
+ Returns
356
+ -------
357
+ numpy.ndarray
358
+ Object dtype array containing native Python datetime objects.
359
+
360
+ See Also
361
+ --------
362
+ datetime.datetime : Standard library value for a datetime.
363
+
364
+ Examples
365
+ --------
366
+ >>> s = pd.Series(pd.date_range('20180310', periods=2))
367
+ >>> s
368
+ 0 2018-03-10
369
+ 1 2018-03-11
370
+ dtype: datetime64[ns]
371
+
372
+ >>> s.dt.to_pydatetime()
373
+ array([datetime.datetime(2018, 3, 10, 0, 0),
374
+ datetime.datetime(2018, 3, 11, 0, 0)], dtype=object)
375
+
376
+ pandas' nanosecond precision is truncated to microseconds.
377
+
378
+ >>> s = pd.Series(pd.date_range('20180310', periods=2, freq='ns'))
379
+ >>> s
380
+ 0 2018-03-10 00:00:00.000000000
381
+ 1 2018-03-10 00:00:00.000000001
382
+ dtype: datetime64[ns]
383
+
384
+ >>> s.dt.to_pydatetime()
385
+ array([datetime.datetime(2018, 3, 10, 0, 0),
386
+ datetime.datetime(2018, 3, 10, 0, 0)], dtype=object)
387
+ """
388
+ # GH#20306
389
+ warnings.warn(
390
+ f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, "
391
+ "in a future version this will return a Series containing python "
392
+ "datetime objects instead of an ndarray. To retain the old behavior, "
393
+ "call `np.array` on the result",
394
+ FutureWarning,
395
+ stacklevel=find_stack_level(),
396
+ )
397
+ return self._get_values().to_pydatetime()
398
+
399
+ @property
400
+ def freq(self):
401
+ return self._get_values().inferred_freq
402
+
403
+ def isocalendar(self) -> DataFrame:
404
+ """
405
+ Calculate year, week, and day according to the ISO 8601 standard.
406
+
407
+ Returns
408
+ -------
409
+ DataFrame
410
+ With columns year, week and day.
411
+
412
+ See Also
413
+ --------
414
+ Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
415
+ week number, and weekday for the given Timestamp object.
416
+ datetime.date.isocalendar : Return a named tuple object with
417
+ three components: year, week and weekday.
418
+
419
+ Examples
420
+ --------
421
+ >>> ser = pd.to_datetime(pd.Series(["2010-01-01", pd.NaT]))
422
+ >>> ser.dt.isocalendar()
423
+ year week day
424
+ 0 2009 53 5
425
+ 1 <NA> <NA> <NA>
426
+ >>> ser.dt.isocalendar().week
427
+ 0 53
428
+ 1 <NA>
429
+ Name: week, dtype: UInt32
430
+ """
431
+ return self._get_values().isocalendar().set_index(self._parent.index)
432
+
433
+
434
+ @delegate_names(
435
+ delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ="property"
436
+ )
437
+ @delegate_names(
438
+ delegate=TimedeltaArray,
439
+ accessors=TimedeltaArray._datetimelike_methods,
440
+ typ="method",
441
+ )
442
+ class TimedeltaProperties(Properties):
443
+ """
444
+ Accessor object for datetimelike properties of the Series values.
445
+
446
+ Returns a Series indexed like the original Series.
447
+ Raises TypeError if the Series does not contain datetimelike values.
448
+
449
+ Examples
450
+ --------
451
+ >>> seconds_series = pd.Series(
452
+ ... pd.timedelta_range(start="1 second", periods=3, freq="s")
453
+ ... )
454
+ >>> seconds_series
455
+ 0 0 days 00:00:01
456
+ 1 0 days 00:00:02
457
+ 2 0 days 00:00:03
458
+ dtype: timedelta64[ns]
459
+ >>> seconds_series.dt.seconds
460
+ 0 1
461
+ 1 2
462
+ 2 3
463
+ dtype: int32
464
+ """
465
+
466
+ def to_pytimedelta(self) -> np.ndarray:
467
+ """
468
+ Return an array of native :class:`datetime.timedelta` objects.
469
+
470
+ Python's standard `datetime` library uses a different representation
471
+ timedelta's. This method converts a Series of pandas Timedeltas
472
+ to `datetime.timedelta` format with the same length as the original
473
+ Series.
474
+
475
+ Returns
476
+ -------
477
+ numpy.ndarray
478
+ Array of 1D containing data with `datetime.timedelta` type.
479
+
480
+ See Also
481
+ --------
482
+ datetime.timedelta : A duration expressing the difference
483
+ between two date, time, or datetime.
484
+
485
+ Examples
486
+ --------
487
+ >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="d"))
488
+ >>> s
489
+ 0 0 days
490
+ 1 1 days
491
+ 2 2 days
492
+ 3 3 days
493
+ 4 4 days
494
+ dtype: timedelta64[ns]
495
+
496
+ >>> s.dt.to_pytimedelta()
497
+ array([datetime.timedelta(0), datetime.timedelta(days=1),
498
+ datetime.timedelta(days=2), datetime.timedelta(days=3),
499
+ datetime.timedelta(days=4)], dtype=object)
500
+ """
501
+ return self._get_values().to_pytimedelta()
502
+
503
+ @property
504
+ def components(self):
505
+ """
506
+ Return a Dataframe of the components of the Timedeltas.
507
+
508
+ Returns
509
+ -------
510
+ DataFrame
511
+
512
+ Examples
513
+ --------
514
+ >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s'))
515
+ >>> s
516
+ 0 0 days 00:00:00
517
+ 1 0 days 00:00:01
518
+ 2 0 days 00:00:02
519
+ 3 0 days 00:00:03
520
+ 4 0 days 00:00:04
521
+ dtype: timedelta64[ns]
522
+ >>> s.dt.components
523
+ days hours minutes seconds milliseconds microseconds nanoseconds
524
+ 0 0 0 0 0 0 0 0
525
+ 1 0 0 0 1 0 0 0
526
+ 2 0 0 0 2 0 0 0
527
+ 3 0 0 0 3 0 0 0
528
+ 4 0 0 0 4 0 0 0
529
+ """
530
+ return (
531
+ self._get_values()
532
+ .components.set_index(self._parent.index)
533
+ .__finalize__(self._parent)
534
+ )
535
+
536
+ @property
537
+ def freq(self):
538
+ return self._get_values().inferred_freq
539
+
540
+
541
+ @delegate_names(
542
+ delegate=PeriodArray, accessors=PeriodArray._datetimelike_ops, typ="property"
543
+ )
544
+ @delegate_names(
545
+ delegate=PeriodArray, accessors=PeriodArray._datetimelike_methods, typ="method"
546
+ )
547
+ class PeriodProperties(Properties):
548
+ """
549
+ Accessor object for datetimelike properties of the Series values.
550
+
551
+ Returns a Series indexed like the original Series.
552
+ Raises TypeError if the Series does not contain datetimelike values.
553
+
554
+ Examples
555
+ --------
556
+ >>> seconds_series = pd.Series(
557
+ ... pd.period_range(
558
+ ... start="2000-01-01 00:00:00", end="2000-01-01 00:00:03", freq="s"
559
+ ... )
560
+ ... )
561
+ >>> seconds_series
562
+ 0 2000-01-01 00:00:00
563
+ 1 2000-01-01 00:00:01
564
+ 2 2000-01-01 00:00:02
565
+ 3 2000-01-01 00:00:03
566
+ dtype: period[s]
567
+ >>> seconds_series.dt.second
568
+ 0 0
569
+ 1 1
570
+ 2 2
571
+ 3 3
572
+ dtype: int64
573
+
574
+ >>> hours_series = pd.Series(
575
+ ... pd.period_range(start="2000-01-01 00:00", end="2000-01-01 03:00", freq="h")
576
+ ... )
577
+ >>> hours_series
578
+ 0 2000-01-01 00:00
579
+ 1 2000-01-01 01:00
580
+ 2 2000-01-01 02:00
581
+ 3 2000-01-01 03:00
582
+ dtype: period[h]
583
+ >>> hours_series.dt.hour
584
+ 0 0
585
+ 1 1
586
+ 2 2
587
+ 3 3
588
+ dtype: int64
589
+
590
+ >>> quarters_series = pd.Series(
591
+ ... pd.period_range(start="2000-01-01", end="2000-12-31", freq="Q-DEC")
592
+ ... )
593
+ >>> quarters_series
594
+ 0 2000Q1
595
+ 1 2000Q2
596
+ 2 2000Q3
597
+ 3 2000Q4
598
+ dtype: period[Q-DEC]
599
+ >>> quarters_series.dt.quarter
600
+ 0 1
601
+ 1 2
602
+ 2 3
603
+ 3 4
604
+ dtype: int64
605
+ """
606
+
607
+
608
+ class CombinedDatetimelikeProperties(
609
+ DatetimeProperties, TimedeltaProperties, PeriodProperties
610
+ ):
611
+ def __new__(cls, data: Series): # pyright: ignore[reportInconsistentConstructor]
612
+ # CombinedDatetimelikeProperties isn't really instantiated. Instead
613
+ # we need to choose which parent (datetime or timedelta) is
614
+ # appropriate. Since we're checking the dtypes anyway, we'll just
615
+ # do all the validation here.
616
+
617
+ if not isinstance(data, ABCSeries):
618
+ raise TypeError(
619
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
620
+ )
621
+
622
+ orig = data if isinstance(data.dtype, CategoricalDtype) else None
623
+ if orig is not None:
624
+ data = data._constructor(
625
+ orig.array,
626
+ name=orig.name,
627
+ copy=False,
628
+ dtype=orig._values.categories.dtype,
629
+ index=orig.index,
630
+ )
631
+
632
+ if isinstance(data.dtype, ArrowDtype) and data.dtype.kind in "Mm":
633
+ return ArrowTemporalProperties(data, orig)
634
+ if lib.is_np_dtype(data.dtype, "M"):
635
+ return DatetimeProperties(data, orig)
636
+ elif isinstance(data.dtype, DatetimeTZDtype):
637
+ return DatetimeProperties(data, orig)
638
+ elif lib.is_np_dtype(data.dtype, "m"):
639
+ return TimedeltaProperties(data, orig)
640
+ elif isinstance(data.dtype, PeriodDtype):
641
+ return PeriodProperties(data, orig)
642
+
643
+ raise AttributeError("Can only use .dt accessor with datetimelike values")
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/api.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import textwrap
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ cast,
7
+ )
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs import (
12
+ NaT,
13
+ lib,
14
+ )
15
+ from pandas.errors import InvalidIndexError
16
+
17
+ from pandas.core.dtypes.cast import find_common_type
18
+
19
+ from pandas.core.algorithms import safe_sort
20
+ from pandas.core.indexes.base import (
21
+ Index,
22
+ _new_Index,
23
+ ensure_index,
24
+ ensure_index_from_sequences,
25
+ get_unanimous_names,
26
+ )
27
+ from pandas.core.indexes.category import CategoricalIndex
28
+ from pandas.core.indexes.datetimes import DatetimeIndex
29
+ from pandas.core.indexes.interval import IntervalIndex
30
+ from pandas.core.indexes.multi import MultiIndex
31
+ from pandas.core.indexes.period import PeriodIndex
32
+ from pandas.core.indexes.range import RangeIndex
33
+ from pandas.core.indexes.timedeltas import TimedeltaIndex
34
+
35
+ if TYPE_CHECKING:
36
+ from pandas._typing import Axis
37
+ _sort_msg = textwrap.dedent(
38
+ """\
39
+ Sorting because non-concatenation axis is not aligned. A future version
40
+ of pandas will change to not sort by default.
41
+
42
+ To accept the future behavior, pass 'sort=False'.
43
+
44
+ To retain the current behavior and silence the warning, pass 'sort=True'.
45
+ """
46
+ )
47
+
48
+
49
+ __all__ = [
50
+ "Index",
51
+ "MultiIndex",
52
+ "CategoricalIndex",
53
+ "IntervalIndex",
54
+ "RangeIndex",
55
+ "InvalidIndexError",
56
+ "TimedeltaIndex",
57
+ "PeriodIndex",
58
+ "DatetimeIndex",
59
+ "_new_Index",
60
+ "NaT",
61
+ "ensure_index",
62
+ "ensure_index_from_sequences",
63
+ "get_objs_combined_axis",
64
+ "union_indexes",
65
+ "get_unanimous_names",
66
+ "all_indexes_same",
67
+ "default_index",
68
+ "safe_sort_index",
69
+ ]
70
+
71
+
72
+ def get_objs_combined_axis(
73
+ objs,
74
+ intersect: bool = False,
75
+ axis: Axis = 0,
76
+ sort: bool = True,
77
+ copy: bool = False,
78
+ ) -> Index:
79
+ """
80
+ Extract combined index: return intersection or union (depending on the
81
+ value of "intersect") of indexes on given axis, or None if all objects
82
+ lack indexes (e.g. they are numpy arrays).
83
+
84
+ Parameters
85
+ ----------
86
+ objs : list
87
+ Series or DataFrame objects, may be mix of the two.
88
+ intersect : bool, default False
89
+ If True, calculate the intersection between indexes. Otherwise,
90
+ calculate the union.
91
+ axis : {0 or 'index', 1 or 'outer'}, default 0
92
+ The axis to extract indexes from.
93
+ sort : bool, default True
94
+ Whether the result index should come out sorted or not.
95
+ copy : bool, default False
96
+ If True, return a copy of the combined index.
97
+
98
+ Returns
99
+ -------
100
+ Index
101
+ """
102
+ obs_idxes = [obj._get_axis(axis) for obj in objs]
103
+ return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy)
104
+
105
+
106
+ def _get_distinct_objs(objs: list[Index]) -> list[Index]:
107
+ """
108
+ Return a list with distinct elements of "objs" (different ids).
109
+ Preserves order.
110
+ """
111
+ ids: set[int] = set()
112
+ res = []
113
+ for obj in objs:
114
+ if id(obj) not in ids:
115
+ ids.add(id(obj))
116
+ res.append(obj)
117
+ return res
118
+
119
+
120
+ def _get_combined_index(
121
+ indexes: list[Index],
122
+ intersect: bool = False,
123
+ sort: bool = False,
124
+ copy: bool = False,
125
+ ) -> Index:
126
+ """
127
+ Return the union or intersection of indexes.
128
+
129
+ Parameters
130
+ ----------
131
+ indexes : list of Index or list objects
132
+ When intersect=True, do not accept list of lists.
133
+ intersect : bool, default False
134
+ If True, calculate the intersection between indexes. Otherwise,
135
+ calculate the union.
136
+ sort : bool, default False
137
+ Whether the result index should come out sorted or not.
138
+ copy : bool, default False
139
+ If True, return a copy of the combined index.
140
+
141
+ Returns
142
+ -------
143
+ Index
144
+ """
145
+ # TODO: handle index names!
146
+ indexes = _get_distinct_objs(indexes)
147
+ if len(indexes) == 0:
148
+ index = Index([])
149
+ elif len(indexes) == 1:
150
+ index = indexes[0]
151
+ elif intersect:
152
+ index = indexes[0]
153
+ for other in indexes[1:]:
154
+ index = index.intersection(other)
155
+ else:
156
+ index = union_indexes(indexes, sort=False)
157
+ index = ensure_index(index)
158
+
159
+ if sort:
160
+ index = safe_sort_index(index)
161
+ # GH 29879
162
+ if copy:
163
+ index = index.copy()
164
+
165
+ return index
166
+
167
+
168
+ def safe_sort_index(index: Index) -> Index:
169
+ """
170
+ Returns the sorted index
171
+
172
+ We keep the dtypes and the name attributes.
173
+
174
+ Parameters
175
+ ----------
176
+ index : an Index
177
+
178
+ Returns
179
+ -------
180
+ Index
181
+ """
182
+ if index.is_monotonic_increasing:
183
+ return index
184
+
185
+ try:
186
+ array_sorted = safe_sort(index)
187
+ except TypeError:
188
+ pass
189
+ else:
190
+ if isinstance(array_sorted, Index):
191
+ return array_sorted
192
+
193
+ array_sorted = cast(np.ndarray, array_sorted)
194
+ if isinstance(index, MultiIndex):
195
+ index = MultiIndex.from_tuples(array_sorted, names=index.names)
196
+ else:
197
+ index = Index(array_sorted, name=index.name, dtype=index.dtype)
198
+
199
+ return index
200
+
201
+
202
+ def union_indexes(indexes, sort: bool | None = True) -> Index:
203
+ """
204
+ Return the union of indexes.
205
+
206
+ The behavior of sort and names is not consistent.
207
+
208
+ Parameters
209
+ ----------
210
+ indexes : list of Index or list objects
211
+ sort : bool, default True
212
+ Whether the result index should come out sorted or not.
213
+
214
+ Returns
215
+ -------
216
+ Index
217
+ """
218
+ if len(indexes) == 0:
219
+ raise AssertionError("Must have at least 1 Index to union")
220
+ if len(indexes) == 1:
221
+ result = indexes[0]
222
+ if isinstance(result, list):
223
+ if not sort:
224
+ result = Index(result)
225
+ else:
226
+ result = Index(sorted(result))
227
+ return result
228
+
229
+ indexes, kind = _sanitize_and_check(indexes)
230
+
231
+ def _unique_indices(inds, dtype) -> Index:
232
+ """
233
+ Concatenate indices and remove duplicates.
234
+
235
+ Parameters
236
+ ----------
237
+ inds : list of Index or list objects
238
+ dtype : dtype to set for the resulting Index
239
+
240
+ Returns
241
+ -------
242
+ Index
243
+ """
244
+ if all(isinstance(ind, Index) for ind in inds):
245
+ inds = [ind.astype(dtype, copy=False) for ind in inds]
246
+ result = inds[0].unique()
247
+ other = inds[1].append(inds[2:])
248
+ diff = other[result.get_indexer_for(other) == -1]
249
+ if len(diff):
250
+ result = result.append(diff.unique())
251
+ if sort:
252
+ result = result.sort_values()
253
+ return result
254
+
255
+ def conv(i):
256
+ if isinstance(i, Index):
257
+ i = i.tolist()
258
+ return i
259
+
260
+ return Index(
261
+ lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort),
262
+ dtype=dtype,
263
+ )
264
+
265
+ def _find_common_index_dtype(inds):
266
+ """
267
+ Finds a common type for the indexes to pass through to resulting index.
268
+
269
+ Parameters
270
+ ----------
271
+ inds: list of Index or list objects
272
+
273
+ Returns
274
+ -------
275
+ The common type or None if no indexes were given
276
+ """
277
+ dtypes = [idx.dtype for idx in indexes if isinstance(idx, Index)]
278
+ if dtypes:
279
+ dtype = find_common_type(dtypes)
280
+ else:
281
+ dtype = None
282
+
283
+ return dtype
284
+
285
+ if kind == "special":
286
+ result = indexes[0]
287
+
288
+ dtis = [x for x in indexes if isinstance(x, DatetimeIndex)]
289
+ dti_tzs = [x for x in dtis if x.tz is not None]
290
+ if len(dti_tzs) not in [0, len(dtis)]:
291
+ # TODO: this behavior is not tested (so may not be desired),
292
+ # but is kept in order to keep behavior the same when
293
+ # deprecating union_many
294
+ # test_frame_from_dict_with_mixed_indexes
295
+ raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
296
+
297
+ if len(dtis) == len(indexes):
298
+ sort = True
299
+ result = indexes[0]
300
+
301
+ elif len(dtis) > 1:
302
+ # If we have mixed timezones, our casting behavior may depend on
303
+ # the order of indexes, which we don't want.
304
+ sort = False
305
+
306
+ # TODO: what about Categorical[dt64]?
307
+ # test_frame_from_dict_with_mixed_indexes
308
+ indexes = [x.astype(object, copy=False) for x in indexes]
309
+ result = indexes[0]
310
+
311
+ for other in indexes[1:]:
312
+ result = result.union(other, sort=None if sort else False)
313
+ return result
314
+
315
+ elif kind == "array":
316
+ dtype = _find_common_index_dtype(indexes)
317
+ index = indexes[0]
318
+ if not all(index.equals(other) for other in indexes[1:]):
319
+ index = _unique_indices(indexes, dtype)
320
+
321
+ name = get_unanimous_names(*indexes)[0]
322
+ if name != index.name:
323
+ index = index.rename(name)
324
+ return index
325
+ else: # kind='list'
326
+ dtype = _find_common_index_dtype(indexes)
327
+ return _unique_indices(indexes, dtype)
328
+
329
+
330
+ def _sanitize_and_check(indexes):
331
+ """
332
+ Verify the type of indexes and convert lists to Index.
333
+
334
+ Cases:
335
+
336
+ - [list, list, ...]: Return ([list, list, ...], 'list')
337
+ - [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
338
+ Lists are sorted and converted to Index.
339
+ - [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
340
+ TYPE = 'special' if at least one special type, 'array' otherwise.
341
+
342
+ Parameters
343
+ ----------
344
+ indexes : list of Index or list objects
345
+
346
+ Returns
347
+ -------
348
+ sanitized_indexes : list of Index or list objects
349
+ type : {'list', 'array', 'special'}
350
+ """
351
+ kinds = list({type(index) for index in indexes})
352
+
353
+ if list in kinds:
354
+ if len(kinds) > 1:
355
+ indexes = [
356
+ Index(list(x)) if not isinstance(x, Index) else x for x in indexes
357
+ ]
358
+ kinds.remove(list)
359
+ else:
360
+ return indexes, "list"
361
+
362
+ if len(kinds) > 1 or Index not in kinds:
363
+ return indexes, "special"
364
+ else:
365
+ return indexes, "array"
366
+
367
+
368
+ def all_indexes_same(indexes) -> bool:
369
+ """
370
+ Determine if all indexes contain the same elements.
371
+
372
+ Parameters
373
+ ----------
374
+ indexes : iterable of Index objects
375
+
376
+ Returns
377
+ -------
378
+ bool
379
+ True if all indexes contain the same elements, False otherwise.
380
+ """
381
+ itr = iter(indexes)
382
+ first = next(itr)
383
+ return all(first.equals(index) for index in itr)
384
+
385
+
386
+ def default_index(n: int) -> RangeIndex:
387
+ rng = range(n)
388
+ return RangeIndex._simple_new(rng, name=None)
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/base.py ADDED
The diff for this file is too large to render. See raw diff
 
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/category.py ADDED
@@ -0,0 +1,513 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ Literal,
7
+ cast,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import index as libindex
13
+ from pandas.util._decorators import (
14
+ cache_readonly,
15
+ doc,
16
+ )
17
+
18
+ from pandas.core.dtypes.common import is_scalar
19
+ from pandas.core.dtypes.concat import concat_compat
20
+ from pandas.core.dtypes.dtypes import CategoricalDtype
21
+ from pandas.core.dtypes.missing import (
22
+ is_valid_na_for_dtype,
23
+ isna,
24
+ )
25
+
26
+ from pandas.core.arrays.categorical import (
27
+ Categorical,
28
+ contains,
29
+ )
30
+ from pandas.core.construction import extract_array
31
+ from pandas.core.indexes.base import (
32
+ Index,
33
+ maybe_extract_name,
34
+ )
35
+ from pandas.core.indexes.extension import (
36
+ NDArrayBackedExtensionIndex,
37
+ inherit_names,
38
+ )
39
+
40
+ if TYPE_CHECKING:
41
+ from collections.abc import Hashable
42
+
43
+ from pandas._typing import (
44
+ Dtype,
45
+ DtypeObj,
46
+ Self,
47
+ npt,
48
+ )
49
+
50
+
51
+ @inherit_names(
52
+ [
53
+ "argsort",
54
+ "tolist",
55
+ "codes",
56
+ "categories",
57
+ "ordered",
58
+ "_reverse_indexer",
59
+ "searchsorted",
60
+ "min",
61
+ "max",
62
+ ],
63
+ Categorical,
64
+ )
65
+ @inherit_names(
66
+ [
67
+ "rename_categories",
68
+ "reorder_categories",
69
+ "add_categories",
70
+ "remove_categories",
71
+ "remove_unused_categories",
72
+ "set_categories",
73
+ "as_ordered",
74
+ "as_unordered",
75
+ ],
76
+ Categorical,
77
+ wrap=True,
78
+ )
79
+ class CategoricalIndex(NDArrayBackedExtensionIndex):
80
+ """
81
+ Index based on an underlying :class:`Categorical`.
82
+
83
+ CategoricalIndex, like Categorical, can only take on a limited,
84
+ and usually fixed, number of possible values (`categories`). Also,
85
+ like Categorical, it might have an order, but numerical operations
86
+ (additions, divisions, ...) are not possible.
87
+
88
+ Parameters
89
+ ----------
90
+ data : array-like (1-dimensional)
91
+ The values of the categorical. If `categories` are given, values not in
92
+ `categories` will be replaced with NaN.
93
+ categories : index-like, optional
94
+ The categories for the categorical. Items need to be unique.
95
+ If the categories are not given here (and also not in `dtype`), they
96
+ will be inferred from the `data`.
97
+ ordered : bool, optional
98
+ Whether or not this categorical is treated as an ordered
99
+ categorical. If not given here or in `dtype`, the resulting
100
+ categorical will be unordered.
101
+ dtype : CategoricalDtype or "category", optional
102
+ If :class:`CategoricalDtype`, cannot be used together with
103
+ `categories` or `ordered`.
104
+ copy : bool, default False
105
+ Make a copy of input ndarray.
106
+ name : object, optional
107
+ Name to be stored in the index.
108
+
109
+ Attributes
110
+ ----------
111
+ codes
112
+ categories
113
+ ordered
114
+
115
+ Methods
116
+ -------
117
+ rename_categories
118
+ reorder_categories
119
+ add_categories
120
+ remove_categories
121
+ remove_unused_categories
122
+ set_categories
123
+ as_ordered
124
+ as_unordered
125
+ map
126
+
127
+ Raises
128
+ ------
129
+ ValueError
130
+ If the categories do not validate.
131
+ TypeError
132
+ If an explicit ``ordered=True`` is given but no `categories` and the
133
+ `values` are not sortable.
134
+
135
+ See Also
136
+ --------
137
+ Index : The base pandas Index type.
138
+ Categorical : A categorical array.
139
+ CategoricalDtype : Type for categorical data.
140
+
141
+ Notes
142
+ -----
143
+ See the `user guide
144
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`__
145
+ for more.
146
+
147
+ Examples
148
+ --------
149
+ >>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])
150
+ CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
151
+ categories=['a', 'b', 'c'], ordered=False, dtype='category')
152
+
153
+ ``CategoricalIndex`` can also be instantiated from a ``Categorical``:
154
+
155
+ >>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"])
156
+ >>> pd.CategoricalIndex(c)
157
+ CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
158
+ categories=['a', 'b', 'c'], ordered=False, dtype='category')
159
+
160
+ Ordered ``CategoricalIndex`` can have a min and max value.
161
+
162
+ >>> ci = pd.CategoricalIndex(
163
+ ... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]
164
+ ... )
165
+ >>> ci
166
+ CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
167
+ categories=['c', 'b', 'a'], ordered=True, dtype='category')
168
+ >>> ci.min()
169
+ 'c'
170
+ """
171
+
172
+ _typ = "categoricalindex"
173
+ _data_cls = Categorical
174
+
175
+ @property
176
+ def _can_hold_strings(self):
177
+ return self.categories._can_hold_strings
178
+
179
+ @cache_readonly
180
+ def _should_fallback_to_positional(self) -> bool:
181
+ return self.categories._should_fallback_to_positional
182
+
183
+ codes: np.ndarray
184
+ categories: Index
185
+ ordered: bool | None
186
+ _data: Categorical
187
+ _values: Categorical
188
+
189
+ @property
190
+ def _engine_type(self) -> type[libindex.IndexEngine]:
191
+ # self.codes can have dtype int8, int16, int32 or int64, so we need
192
+ # to return the corresponding engine type (libindex.Int8Engine, etc.).
193
+ return {
194
+ np.int8: libindex.Int8Engine,
195
+ np.int16: libindex.Int16Engine,
196
+ np.int32: libindex.Int32Engine,
197
+ np.int64: libindex.Int64Engine,
198
+ }[self.codes.dtype.type]
199
+
200
+ # --------------------------------------------------------------------
201
+ # Constructors
202
+
203
+ def __new__(
204
+ cls,
205
+ data=None,
206
+ categories=None,
207
+ ordered=None,
208
+ dtype: Dtype | None = None,
209
+ copy: bool = False,
210
+ name: Hashable | None = None,
211
+ ) -> Self:
212
+ name = maybe_extract_name(name, data, cls)
213
+
214
+ if is_scalar(data):
215
+ # GH#38944 include None here, which pre-2.0 subbed in []
216
+ cls._raise_scalar_data_error(data)
217
+
218
+ data = Categorical(
219
+ data, categories=categories, ordered=ordered, dtype=dtype, copy=copy
220
+ )
221
+
222
+ return cls._simple_new(data, name=name)
223
+
224
+ # --------------------------------------------------------------------
225
+
226
+ def _is_dtype_compat(self, other: Index) -> Categorical:
227
+ """
228
+ *this is an internal non-public method*
229
+
230
+ provide a comparison between the dtype of self and other (coercing if
231
+ needed)
232
+
233
+ Parameters
234
+ ----------
235
+ other : Index
236
+
237
+ Returns
238
+ -------
239
+ Categorical
240
+
241
+ Raises
242
+ ------
243
+ TypeError if the dtypes are not compatible
244
+ """
245
+ if isinstance(other.dtype, CategoricalDtype):
246
+ cat = extract_array(other)
247
+ cat = cast(Categorical, cat)
248
+ if not cat._categories_match_up_to_permutation(self._values):
249
+ raise TypeError(
250
+ "categories must match existing categories when appending"
251
+ )
252
+
253
+ elif other._is_multi:
254
+ # preempt raising NotImplementedError in isna call
255
+ raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex")
256
+ else:
257
+ values = other
258
+
259
+ cat = Categorical(other, dtype=self.dtype)
260
+ other = CategoricalIndex(cat)
261
+ if not other.isin(values).all():
262
+ raise TypeError(
263
+ "cannot append a non-category item to a CategoricalIndex"
264
+ )
265
+ cat = other._values
266
+
267
+ if not ((cat == values) | (isna(cat) & isna(values))).all():
268
+ # GH#37667 see test_equals_non_category
269
+ raise TypeError(
270
+ "categories must match existing categories when appending"
271
+ )
272
+
273
+ return cat
274
+
275
+ def equals(self, other: object) -> bool:
276
+ """
277
+ Determine if two CategoricalIndex objects contain the same elements.
278
+
279
+ Returns
280
+ -------
281
+ bool
282
+ ``True`` if two :class:`pandas.CategoricalIndex` objects have equal
283
+ elements, ``False`` otherwise.
284
+
285
+ Examples
286
+ --------
287
+ >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])
288
+ >>> ci2 = pd.CategoricalIndex(pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']))
289
+ >>> ci.equals(ci2)
290
+ True
291
+
292
+ The order of elements matters.
293
+
294
+ >>> ci3 = pd.CategoricalIndex(['c', 'b', 'a', 'a', 'b', 'c'])
295
+ >>> ci.equals(ci3)
296
+ False
297
+
298
+ The orderedness also matters.
299
+
300
+ >>> ci4 = ci.as_ordered()
301
+ >>> ci.equals(ci4)
302
+ False
303
+
304
+ The categories matter, but the order of the categories matters only when
305
+ ``ordered=True``.
306
+
307
+ >>> ci5 = ci.set_categories(['a', 'b', 'c', 'd'])
308
+ >>> ci.equals(ci5)
309
+ False
310
+
311
+ >>> ci6 = ci.set_categories(['b', 'c', 'a'])
312
+ >>> ci.equals(ci6)
313
+ True
314
+ >>> ci_ordered = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
315
+ ... ordered=True)
316
+ >>> ci2_ordered = ci_ordered.set_categories(['b', 'c', 'a'])
317
+ >>> ci_ordered.equals(ci2_ordered)
318
+ False
319
+ """
320
+ if self.is_(other):
321
+ return True
322
+
323
+ if not isinstance(other, Index):
324
+ return False
325
+
326
+ try:
327
+ other = self._is_dtype_compat(other)
328
+ except (TypeError, ValueError):
329
+ return False
330
+
331
+ return self._data.equals(other)
332
+
333
+ # --------------------------------------------------------------------
334
+ # Rendering Methods
335
+
336
+ @property
337
+ def _formatter_func(self):
338
+ return self.categories._formatter_func
339
+
340
+ def _format_attrs(self):
341
+ """
342
+ Return a list of tuples of the (attr,formatted_value)
343
+ """
344
+ attrs: list[tuple[str, str | int | bool | None]]
345
+
346
+ attrs = [
347
+ (
348
+ "categories",
349
+ f"[{', '.join(self._data._repr_categories())}]",
350
+ ),
351
+ ("ordered", self.ordered),
352
+ ]
353
+ extra = super()._format_attrs()
354
+ return attrs + extra
355
+
356
+ # --------------------------------------------------------------------
357
+
358
+ @property
359
+ def inferred_type(self) -> str:
360
+ return "categorical"
361
+
362
+ @doc(Index.__contains__)
363
+ def __contains__(self, key: Any) -> bool:
364
+ # if key is a NaN, check if any NaN is in self.
365
+ if is_valid_na_for_dtype(key, self.categories.dtype):
366
+ return self.hasnans
367
+
368
+ return contains(self, key, container=self._engine)
369
+
370
+ def reindex(
371
+ self, target, method=None, level=None, limit: int | None = None, tolerance=None
372
+ ) -> tuple[Index, npt.NDArray[np.intp] | None]:
373
+ """
374
+ Create index with target's values (move/add/delete values as necessary)
375
+
376
+ Returns
377
+ -------
378
+ new_index : pd.Index
379
+ Resulting index
380
+ indexer : np.ndarray[np.intp] or None
381
+ Indices of output values in original index
382
+
383
+ """
384
+ if method is not None:
385
+ raise NotImplementedError(
386
+ "argument method is not implemented for CategoricalIndex.reindex"
387
+ )
388
+ if level is not None:
389
+ raise NotImplementedError(
390
+ "argument level is not implemented for CategoricalIndex.reindex"
391
+ )
392
+ if limit is not None:
393
+ raise NotImplementedError(
394
+ "argument limit is not implemented for CategoricalIndex.reindex"
395
+ )
396
+ return super().reindex(target)
397
+
398
+ # --------------------------------------------------------------------
399
+ # Indexing Methods
400
+
401
+ def _maybe_cast_indexer(self, key) -> int:
402
+ # GH#41933: we have to do this instead of self._data._validate_scalar
403
+ # because this will correctly get partial-indexing on Interval categories
404
+ try:
405
+ return self._data._unbox_scalar(key)
406
+ except KeyError:
407
+ if is_valid_na_for_dtype(key, self.categories.dtype):
408
+ return -1
409
+ raise
410
+
411
+ def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex:
412
+ if isinstance(values, CategoricalIndex):
413
+ values = values._data
414
+ if isinstance(values, Categorical):
415
+ # Indexing on codes is more efficient if categories are the same,
416
+ # so we can apply some optimizations based on the degree of
417
+ # dtype-matching.
418
+ cat = self._data._encode_with_my_categories(values)
419
+ codes = cat._codes
420
+ else:
421
+ codes = self.categories.get_indexer(values)
422
+ codes = codes.astype(self.codes.dtype, copy=False)
423
+ cat = self._data._from_backing_data(codes)
424
+ return type(self)._simple_new(cat)
425
+
426
+ # --------------------------------------------------------------------
427
+
428
+ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
429
+ return self.categories._is_comparable_dtype(dtype)
430
+
431
+ def map(self, mapper, na_action: Literal["ignore"] | None = None):
432
+ """
433
+ Map values using input an input mapping or function.
434
+
435
+ Maps the values (their categories, not the codes) of the index to new
436
+ categories. If the mapping correspondence is one-to-one the result is a
437
+ :class:`~pandas.CategoricalIndex` which has the same order property as
438
+ the original, otherwise an :class:`~pandas.Index` is returned.
439
+
440
+ If a `dict` or :class:`~pandas.Series` is used any unmapped category is
441
+ mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
442
+ will be returned.
443
+
444
+ Parameters
445
+ ----------
446
+ mapper : function, dict, or Series
447
+ Mapping correspondence.
448
+
449
+ Returns
450
+ -------
451
+ pandas.CategoricalIndex or pandas.Index
452
+ Mapped index.
453
+
454
+ See Also
455
+ --------
456
+ Index.map : Apply a mapping correspondence on an
457
+ :class:`~pandas.Index`.
458
+ Series.map : Apply a mapping correspondence on a
459
+ :class:`~pandas.Series`.
460
+ Series.apply : Apply more complex functions on a
461
+ :class:`~pandas.Series`.
462
+
463
+ Examples
464
+ --------
465
+ >>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
466
+ >>> idx
467
+ CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
468
+ ordered=False, dtype='category')
469
+ >>> idx.map(lambda x: x.upper())
470
+ CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
471
+ ordered=False, dtype='category')
472
+ >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
473
+ CategoricalIndex(['first', 'second', 'third'], categories=['first',
474
+ 'second', 'third'], ordered=False, dtype='category')
475
+
476
+ If the mapping is one-to-one the ordering of the categories is
477
+ preserved:
478
+
479
+ >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
480
+ >>> idx
481
+ CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
482
+ ordered=True, dtype='category')
483
+ >>> idx.map({'a': 3, 'b': 2, 'c': 1})
484
+ CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
485
+ dtype='category')
486
+
487
+ If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
488
+
489
+ >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
490
+ Index(['first', 'second', 'first'], dtype='object')
491
+
492
+ If a `dict` is used, all unmapped categories are mapped to `NaN` and
493
+ the result is an :class:`~pandas.Index`:
494
+
495
+ >>> idx.map({'a': 'first', 'b': 'second'})
496
+ Index(['first', 'second', nan], dtype='object')
497
+ """
498
+ mapped = self._values.map(mapper, na_action=na_action)
499
+ return Index(mapped, name=self.name)
500
+
501
+ def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
502
+ # if calling index is category, don't check dtype of others
503
+ try:
504
+ cat = Categorical._concat_same_type(
505
+ [self._is_dtype_compat(c) for c in to_concat]
506
+ )
507
+ except TypeError:
508
+ # not all to_concat elements are among our categories (or NA)
509
+
510
+ res = concat_compat([x._values for x in to_concat])
511
+ return Index(res, name=name)
512
+ else:
513
+ return type(self)._simple_new(cat, name=name)
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/datetimelike.py ADDED
@@ -0,0 +1,843 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Base and utility classes for tseries type pandas objects.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from abc import (
7
+ ABC,
8
+ abstractmethod,
9
+ )
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Any,
13
+ Callable,
14
+ cast,
15
+ final,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+
21
+ from pandas._config import using_copy_on_write
22
+
23
+ from pandas._libs import (
24
+ NaT,
25
+ Timedelta,
26
+ lib,
27
+ )
28
+ from pandas._libs.tslibs import (
29
+ BaseOffset,
30
+ Resolution,
31
+ Tick,
32
+ parsing,
33
+ to_offset,
34
+ )
35
+ from pandas._libs.tslibs.dtypes import freq_to_period_freqstr
36
+ from pandas.compat.numpy import function as nv
37
+ from pandas.errors import (
38
+ InvalidIndexError,
39
+ NullFrequencyError,
40
+ )
41
+ from pandas.util._decorators import (
42
+ Appender,
43
+ cache_readonly,
44
+ doc,
45
+ )
46
+ from pandas.util._exceptions import find_stack_level
47
+
48
+ from pandas.core.dtypes.common import (
49
+ is_integer,
50
+ is_list_like,
51
+ )
52
+ from pandas.core.dtypes.concat import concat_compat
53
+ from pandas.core.dtypes.dtypes import CategoricalDtype
54
+
55
+ from pandas.core.arrays import (
56
+ DatetimeArray,
57
+ ExtensionArray,
58
+ PeriodArray,
59
+ TimedeltaArray,
60
+ )
61
+ from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
62
+ import pandas.core.common as com
63
+ import pandas.core.indexes.base as ibase
64
+ from pandas.core.indexes.base import (
65
+ Index,
66
+ _index_shared_docs,
67
+ )
68
+ from pandas.core.indexes.extension import NDArrayBackedExtensionIndex
69
+ from pandas.core.indexes.range import RangeIndex
70
+ from pandas.core.tools.timedeltas import to_timedelta
71
+
72
+ if TYPE_CHECKING:
73
+ from collections.abc import Sequence
74
+ from datetime import datetime
75
+
76
+ from pandas._typing import (
77
+ Axis,
78
+ Self,
79
+ npt,
80
+ )
81
+
82
+ from pandas import CategoricalIndex
83
+
84
+ _index_doc_kwargs = dict(ibase._index_doc_kwargs)
85
+
86
+
87
+ class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex, ABC):
88
+ """
89
+ Common ops mixin to support a unified interface datetimelike Index.
90
+ """
91
+
92
+ _can_hold_strings = False
93
+ _data: DatetimeArray | TimedeltaArray | PeriodArray
94
+
95
+ @doc(DatetimeLikeArrayMixin.mean)
96
+ def mean(self, *, skipna: bool = True, axis: int | None = 0):
97
+ return self._data.mean(skipna=skipna, axis=axis)
98
+
99
+ @property
100
+ def freq(self) -> BaseOffset | None:
101
+ return self._data.freq
102
+
103
+ @freq.setter
104
+ def freq(self, value) -> None:
105
+ # error: Property "freq" defined in "PeriodArray" is read-only [misc]
106
+ self._data.freq = value # type: ignore[misc]
107
+
108
+ @property
109
+ def asi8(self) -> npt.NDArray[np.int64]:
110
+ return self._data.asi8
111
+
112
+ @property
113
+ @doc(DatetimeLikeArrayMixin.freqstr)
114
+ def freqstr(self) -> str:
115
+ from pandas import PeriodIndex
116
+
117
+ if self._data.freqstr is not None and isinstance(
118
+ self._data, (PeriodArray, PeriodIndex)
119
+ ):
120
+ freq = freq_to_period_freqstr(self._data.freq.n, self._data.freq.name)
121
+ return freq
122
+ else:
123
+ return self._data.freqstr # type: ignore[return-value]
124
+
125
+ @cache_readonly
126
+ @abstractmethod
127
+ def _resolution_obj(self) -> Resolution:
128
+ ...
129
+
130
+ @cache_readonly
131
+ @doc(DatetimeLikeArrayMixin.resolution)
132
+ def resolution(self) -> str:
133
+ return self._data.resolution
134
+
135
+ # ------------------------------------------------------------------------
136
+
137
+ @cache_readonly
138
+ def hasnans(self) -> bool:
139
+ return self._data._hasna
140
+
141
+ def equals(self, other: Any) -> bool:
142
+ """
143
+ Determines if two Index objects contain the same elements.
144
+ """
145
+ if self.is_(other):
146
+ return True
147
+
148
+ if not isinstance(other, Index):
149
+ return False
150
+ elif other.dtype.kind in "iufc":
151
+ return False
152
+ elif not isinstance(other, type(self)):
153
+ should_try = False
154
+ inferable = self._data._infer_matches
155
+ if other.dtype == object:
156
+ should_try = other.inferred_type in inferable
157
+ elif isinstance(other.dtype, CategoricalDtype):
158
+ other = cast("CategoricalIndex", other)
159
+ should_try = other.categories.inferred_type in inferable
160
+
161
+ if should_try:
162
+ try:
163
+ other = type(self)(other)
164
+ except (ValueError, TypeError, OverflowError):
165
+ # e.g.
166
+ # ValueError -> cannot parse str entry, or OutOfBoundsDatetime
167
+ # TypeError -> trying to convert IntervalIndex to DatetimeIndex
168
+ # OverflowError -> Index([very_large_timedeltas])
169
+ return False
170
+
171
+ if self.dtype != other.dtype:
172
+ # have different timezone
173
+ return False
174
+
175
+ return np.array_equal(self.asi8, other.asi8)
176
+
177
+ @Appender(Index.__contains__.__doc__)
178
+ def __contains__(self, key: Any) -> bool:
179
+ hash(key)
180
+ try:
181
+ self.get_loc(key)
182
+ except (KeyError, TypeError, ValueError, InvalidIndexError):
183
+ return False
184
+ return True
185
+
186
+ def _convert_tolerance(self, tolerance, target):
187
+ tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
188
+ return super()._convert_tolerance(tolerance, target)
189
+
190
+ # --------------------------------------------------------------------
191
+ # Rendering Methods
192
+ _default_na_rep = "NaT"
193
+
194
+ def format(
195
+ self,
196
+ name: bool = False,
197
+ formatter: Callable | None = None,
198
+ na_rep: str = "NaT",
199
+ date_format: str | None = None,
200
+ ) -> list[str]:
201
+ """
202
+ Render a string representation of the Index.
203
+ """
204
+ warnings.warn(
205
+ # GH#55413
206
+ f"{type(self).__name__}.format is deprecated and will be removed "
207
+ "in a future version. Convert using index.astype(str) or "
208
+ "index.map(formatter) instead.",
209
+ FutureWarning,
210
+ stacklevel=find_stack_level(),
211
+ )
212
+ header = []
213
+ if name:
214
+ header.append(
215
+ ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
216
+ if self.name is not None
217
+ else ""
218
+ )
219
+
220
+ if formatter is not None:
221
+ return header + list(self.map(formatter))
222
+
223
+ return self._format_with_header(
224
+ header=header, na_rep=na_rep, date_format=date_format
225
+ )
226
+
227
+ def _format_with_header(
228
+ self, *, header: list[str], na_rep: str, date_format: str | None = None
229
+ ) -> list[str]:
230
+ # TODO: not reached in tests 2023-10-11
231
+ # matches base class except for whitespace padding and date_format
232
+ return header + list(
233
+ self._get_values_for_csv(na_rep=na_rep, date_format=date_format)
234
+ )
235
+
236
+ @property
237
+ def _formatter_func(self):
238
+ return self._data._formatter()
239
+
240
+ def _format_attrs(self):
241
+ """
242
+ Return a list of tuples of the (attr,formatted_value).
243
+ """
244
+ attrs = super()._format_attrs()
245
+ for attrib in self._attributes:
246
+ # iterating over _attributes prevents us from doing this for PeriodIndex
247
+ if attrib == "freq":
248
+ freq = self.freqstr
249
+ if freq is not None:
250
+ freq = repr(freq) # e.g. D -> 'D'
251
+ attrs.append(("freq", freq))
252
+ return attrs
253
+
254
+ @Appender(Index._summary.__doc__)
255
+ def _summary(self, name=None) -> str:
256
+ result = super()._summary(name=name)
257
+ if self.freq:
258
+ result += f"\nFreq: {self.freqstr}"
259
+
260
+ return result
261
+
262
+ # --------------------------------------------------------------------
263
+ # Indexing Methods
264
+
265
+ @final
266
+ def _can_partial_date_slice(self, reso: Resolution) -> bool:
267
+ # e.g. test_getitem_setitem_periodindex
268
+ # History of conversation GH#3452, GH#3931, GH#2369, GH#14826
269
+ return reso > self._resolution_obj
270
+ # NB: for DTI/PI, not TDI
271
+
272
+ def _parsed_string_to_bounds(self, reso: Resolution, parsed):
273
+ raise NotImplementedError
274
+
275
+ def _parse_with_reso(self, label: str):
276
+ # overridden by TimedeltaIndex
277
+ try:
278
+ if self.freq is None or hasattr(self.freq, "rule_code"):
279
+ freq = self.freq
280
+ except NotImplementedError:
281
+ freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
282
+
283
+ freqstr: str | None
284
+ if freq is not None and not isinstance(freq, str):
285
+ freqstr = freq.rule_code
286
+ else:
287
+ freqstr = freq
288
+
289
+ if isinstance(label, np.str_):
290
+ # GH#45580
291
+ label = str(label)
292
+
293
+ parsed, reso_str = parsing.parse_datetime_string_with_reso(label, freqstr)
294
+ reso = Resolution.from_attrname(reso_str)
295
+ return parsed, reso
296
+
297
+ def _get_string_slice(self, key: str):
298
+ # overridden by TimedeltaIndex
299
+ parsed, reso = self._parse_with_reso(key)
300
+ try:
301
+ return self._partial_date_slice(reso, parsed)
302
+ except KeyError as err:
303
+ raise KeyError(key) from err
304
+
305
+ @final
306
+ def _partial_date_slice(
307
+ self,
308
+ reso: Resolution,
309
+ parsed: datetime,
310
+ ) -> slice | npt.NDArray[np.intp]:
311
+ """
312
+ Parameters
313
+ ----------
314
+ reso : Resolution
315
+ parsed : datetime
316
+
317
+ Returns
318
+ -------
319
+ slice or ndarray[intp]
320
+ """
321
+ if not self._can_partial_date_slice(reso):
322
+ raise ValueError
323
+
324
+ t1, t2 = self._parsed_string_to_bounds(reso, parsed)
325
+ vals = self._data._ndarray
326
+ unbox = self._data._unbox
327
+
328
+ if self.is_monotonic_increasing:
329
+ if len(self) and (
330
+ (t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
331
+ ):
332
+ # we are out of range
333
+ raise KeyError
334
+
335
+ # TODO: does this depend on being monotonic _increasing_?
336
+
337
+ # a monotonic (sorted) series can be sliced
338
+ left = vals.searchsorted(unbox(t1), side="left")
339
+ right = vals.searchsorted(unbox(t2), side="right")
340
+ return slice(left, right)
341
+
342
+ else:
343
+ lhs_mask = vals >= unbox(t1)
344
+ rhs_mask = vals <= unbox(t2)
345
+
346
+ # try to find the dates
347
+ return (lhs_mask & rhs_mask).nonzero()[0]
348
+
349
+ def _maybe_cast_slice_bound(self, label, side: str):
350
+ """
351
+ If label is a string, cast it to scalar type according to resolution.
352
+
353
+ Parameters
354
+ ----------
355
+ label : object
356
+ side : {'left', 'right'}
357
+
358
+ Returns
359
+ -------
360
+ label : object
361
+
362
+ Notes
363
+ -----
364
+ Value of `side` parameter should be validated in caller.
365
+ """
366
+ if isinstance(label, str):
367
+ try:
368
+ parsed, reso = self._parse_with_reso(label)
369
+ except ValueError as err:
370
+ # DTI -> parsing.DateParseError
371
+ # TDI -> 'unit abbreviation w/o a number'
372
+ # PI -> string cannot be parsed as datetime-like
373
+ self._raise_invalid_indexer("slice", label, err)
374
+
375
+ lower, upper = self._parsed_string_to_bounds(reso, parsed)
376
+ return lower if side == "left" else upper
377
+ elif not isinstance(label, self._data._recognized_scalars):
378
+ self._raise_invalid_indexer("slice", label)
379
+
380
+ return label
381
+
382
+ # --------------------------------------------------------------------
383
+ # Arithmetic Methods
384
+
385
+ def shift(self, periods: int = 1, freq=None) -> Self:
386
+ """
387
+ Shift index by desired number of time frequency increments.
388
+
389
+ This method is for shifting the values of datetime-like indexes
390
+ by a specified time increment a given number of times.
391
+
392
+ Parameters
393
+ ----------
394
+ periods : int, default 1
395
+ Number of periods (or increments) to shift by,
396
+ can be positive or negative.
397
+ freq : pandas.DateOffset, pandas.Timedelta or string, optional
398
+ Frequency increment to shift by.
399
+ If None, the index is shifted by its own `freq` attribute.
400
+ Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
401
+
402
+ Returns
403
+ -------
404
+ pandas.DatetimeIndex
405
+ Shifted index.
406
+
407
+ See Also
408
+ --------
409
+ Index.shift : Shift values of Index.
410
+ PeriodIndex.shift : Shift values of PeriodIndex.
411
+ """
412
+ raise NotImplementedError
413
+
414
+ # --------------------------------------------------------------------
415
+
416
+ @doc(Index._maybe_cast_listlike_indexer)
417
+ def _maybe_cast_listlike_indexer(self, keyarr):
418
+ try:
419
+ res = self._data._validate_listlike(keyarr, allow_object=True)
420
+ except (ValueError, TypeError):
421
+ if not isinstance(keyarr, ExtensionArray):
422
+ # e.g. we don't want to cast DTA to ndarray[object]
423
+ res = com.asarray_tuplesafe(keyarr)
424
+ # TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray
425
+ else:
426
+ res = keyarr
427
+ return Index(res, dtype=res.dtype)
428
+
429
+
430
+ class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, ABC):
431
+ """
432
+ Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
433
+ but not PeriodIndex
434
+ """
435
+
436
+ _data: DatetimeArray | TimedeltaArray
437
+ _comparables = ["name", "freq"]
438
+ _attributes = ["name", "freq"]
439
+
440
+ # Compat for frequency inference, see GH#23789
441
+ _is_monotonic_increasing = Index.is_monotonic_increasing
442
+ _is_monotonic_decreasing = Index.is_monotonic_decreasing
443
+ _is_unique = Index.is_unique
444
+
445
+ @property
446
+ def unit(self) -> str:
447
+ return self._data.unit
448
+
449
+ def as_unit(self, unit: str) -> Self:
450
+ """
451
+ Convert to a dtype with the given unit resolution.
452
+
453
+ Parameters
454
+ ----------
455
+ unit : {'s', 'ms', 'us', 'ns'}
456
+
457
+ Returns
458
+ -------
459
+ same type as self
460
+
461
+ Examples
462
+ --------
463
+ For :class:`pandas.DatetimeIndex`:
464
+
465
+ >>> idx = pd.DatetimeIndex(['2020-01-02 01:02:03.004005006'])
466
+ >>> idx
467
+ DatetimeIndex(['2020-01-02 01:02:03.004005006'],
468
+ dtype='datetime64[ns]', freq=None)
469
+ >>> idx.as_unit('s')
470
+ DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None)
471
+
472
+ For :class:`pandas.TimedeltaIndex`:
473
+
474
+ >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns'])
475
+ >>> tdelta_idx
476
+ TimedeltaIndex(['1 days 00:03:00.000002042'],
477
+ dtype='timedelta64[ns]', freq=None)
478
+ >>> tdelta_idx.as_unit('s')
479
+ TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)
480
+ """
481
+ arr = self._data.as_unit(unit)
482
+ return type(self)._simple_new(arr, name=self.name)
483
+
484
+ def _with_freq(self, freq):
485
+ arr = self._data._with_freq(freq)
486
+ return type(self)._simple_new(arr, name=self._name)
487
+
488
+ @property
489
+ def values(self) -> np.ndarray:
490
+ # NB: For Datetime64TZ this is lossy
491
+ data = self._data._ndarray
492
+ if using_copy_on_write():
493
+ data = data.view()
494
+ data.flags.writeable = False
495
+ return data
496
+
497
+ @doc(DatetimeIndexOpsMixin.shift)
498
+ def shift(self, periods: int = 1, freq=None) -> Self:
499
+ if freq is not None and freq != self.freq:
500
+ if isinstance(freq, str):
501
+ freq = to_offset(freq)
502
+ offset = periods * freq
503
+ return self + offset
504
+
505
+ if periods == 0 or len(self) == 0:
506
+ # GH#14811 empty case
507
+ return self.copy()
508
+
509
+ if self.freq is None:
510
+ raise NullFrequencyError("Cannot shift with no freq")
511
+
512
+ start = self[0] + periods * self.freq
513
+ end = self[-1] + periods * self.freq
514
+
515
+ # Note: in the DatetimeTZ case, _generate_range will infer the
516
+ # appropriate timezone from `start` and `end`, so tz does not need
517
+ # to be passed explicitly.
518
+ result = self._data._generate_range(
519
+ start=start, end=end, periods=None, freq=self.freq, unit=self.unit
520
+ )
521
+ return type(self)._simple_new(result, name=self.name)
522
+
523
+ @cache_readonly
524
+ @doc(DatetimeLikeArrayMixin.inferred_freq)
525
+ def inferred_freq(self) -> str | None:
526
+ return self._data.inferred_freq
527
+
528
+ # --------------------------------------------------------------------
529
+ # Set Operation Methods
530
+
531
+ @cache_readonly
532
+ def _as_range_index(self) -> RangeIndex:
533
+ # Convert our i8 representations to RangeIndex
534
+ # Caller is responsible for checking isinstance(self.freq, Tick)
535
+ freq = cast(Tick, self.freq)
536
+ tick = Timedelta(freq).as_unit("ns")._value
537
+ rng = range(self[0]._value, self[-1]._value + tick, tick)
538
+ return RangeIndex(rng)
539
+
540
+ def _can_range_setop(self, other) -> bool:
541
+ return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
542
+
543
+ def _wrap_range_setop(self, other, res_i8) -> Self:
544
+ new_freq = None
545
+ if not len(res_i8):
546
+ # RangeIndex defaults to step=1, which we don't want.
547
+ new_freq = self.freq
548
+ elif isinstance(res_i8, RangeIndex):
549
+ new_freq = to_offset(Timedelta(res_i8.step))
550
+
551
+ # TODO(GH#41493): we cannot just do
552
+ # type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)
553
+ # because test_setops_preserve_freq fails with _validate_frequency raising.
554
+ # This raising is incorrect, as 'on_freq' is incorrect. This will
555
+ # be fixed by GH#41493
556
+ res_values = res_i8.values.view(self._data._ndarray.dtype)
557
+ result = type(self._data)._simple_new(
558
+ # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
559
+ # incompatible type "Union[dtype[Any], ExtensionDtype]"; expected
560
+ # "Union[dtype[datetime64], DatetimeTZDtype]"
561
+ res_values,
562
+ dtype=self.dtype, # type: ignore[arg-type]
563
+ freq=new_freq, # type: ignore[arg-type]
564
+ )
565
+ return cast("Self", self._wrap_setop_result(other, result))
566
+
567
+ def _range_intersect(self, other, sort) -> Self:
568
+ # Dispatch to RangeIndex intersection logic.
569
+ left = self._as_range_index
570
+ right = other._as_range_index
571
+ res_i8 = left.intersection(right, sort=sort)
572
+ return self._wrap_range_setop(other, res_i8)
573
+
574
+ def _range_union(self, other, sort) -> Self:
575
+ # Dispatch to RangeIndex union logic.
576
+ left = self._as_range_index
577
+ right = other._as_range_index
578
+ res_i8 = left.union(right, sort=sort)
579
+ return self._wrap_range_setop(other, res_i8)
580
+
581
+ def _intersection(self, other: Index, sort: bool = False) -> Index:
582
+ """
583
+ intersection specialized to the case with matching dtypes and both non-empty.
584
+ """
585
+ other = cast("DatetimeTimedeltaMixin", other)
586
+
587
+ if self._can_range_setop(other):
588
+ return self._range_intersect(other, sort=sort)
589
+
590
+ if not self._can_fast_intersect(other):
591
+ result = Index._intersection(self, other, sort=sort)
592
+ # We need to invalidate the freq because Index._intersection
593
+ # uses _shallow_copy on a view of self._data, which will preserve
594
+ # self.freq if we're not careful.
595
+ # At this point we should have result.dtype == self.dtype
596
+ # and type(result) is type(self._data)
597
+ result = self._wrap_setop_result(other, result)
598
+ return result._with_freq(None)._with_freq("infer")
599
+
600
+ else:
601
+ return self._fast_intersect(other, sort)
602
+
603
+ def _fast_intersect(self, other, sort):
604
+ # to make our life easier, "sort" the two ranges
605
+ if self[0] <= other[0]:
606
+ left, right = self, other
607
+ else:
608
+ left, right = other, self
609
+
610
+ # after sorting, the intersection always starts with the right index
611
+ # and ends with the index of which the last elements is smallest
612
+ end = min(left[-1], right[-1])
613
+ start = right[0]
614
+
615
+ if end < start:
616
+ result = self[:0]
617
+ else:
618
+ lslice = slice(*left.slice_locs(start, end))
619
+ result = left._values[lslice]
620
+
621
+ return result
622
+
623
+ def _can_fast_intersect(self, other: Self) -> bool:
624
+ # Note: we only get here with len(self) > 0 and len(other) > 0
625
+ if self.freq is None:
626
+ return False
627
+
628
+ elif other.freq != self.freq:
629
+ return False
630
+
631
+ elif not self.is_monotonic_increasing:
632
+ # Because freq is not None, we must then be monotonic decreasing
633
+ return False
634
+
635
+ # this along with matching freqs ensure that we "line up",
636
+ # so intersection will preserve freq
637
+ # Note we are assuming away Ticks, as those go through _range_intersect
638
+ # GH#42104
639
+ return self.freq.n == 1
640
+
641
+ def _can_fast_union(self, other: Self) -> bool:
642
+ # Assumes that type(self) == type(other), as per the annotation
643
+ # The ability to fast_union also implies that `freq` should be
644
+ # retained on union.
645
+ freq = self.freq
646
+
647
+ if freq is None or freq != other.freq:
648
+ return False
649
+
650
+ if not self.is_monotonic_increasing:
651
+ # Because freq is not None, we must then be monotonic decreasing
652
+ # TODO: do union on the reversed indexes?
653
+ return False
654
+
655
+ if len(self) == 0 or len(other) == 0:
656
+ # only reached via union_many
657
+ return True
658
+
659
+ # to make our life easier, "sort" the two ranges
660
+ if self[0] <= other[0]:
661
+ left, right = self, other
662
+ else:
663
+ left, right = other, self
664
+
665
+ right_start = right[0]
666
+ left_end = left[-1]
667
+
668
+ # Only need to "adjoin", not overlap
669
+ return (right_start == left_end + freq) or right_start in left
670
+
671
+ def _fast_union(self, other: Self, sort=None) -> Self:
672
+ # Caller is responsible for ensuring self and other are non-empty
673
+
674
+ # to make our life easier, "sort" the two ranges
675
+ if self[0] <= other[0]:
676
+ left, right = self, other
677
+ elif sort is False:
678
+ # TDIs are not in the "correct" order and we don't want
679
+ # to sort but want to remove overlaps
680
+ left, right = self, other
681
+ left_start = left[0]
682
+ loc = right.searchsorted(left_start, side="left")
683
+ right_chunk = right._values[:loc]
684
+ dates = concat_compat((left._values, right_chunk))
685
+ result = type(self)._simple_new(dates, name=self.name)
686
+ return result
687
+ else:
688
+ left, right = other, self
689
+
690
+ left_end = left[-1]
691
+ right_end = right[-1]
692
+
693
+ # concatenate
694
+ if left_end < right_end:
695
+ loc = right.searchsorted(left_end, side="right")
696
+ right_chunk = right._values[loc:]
697
+ dates = concat_compat([left._values, right_chunk])
698
+ # The can_fast_union check ensures that the result.freq
699
+ # should match self.freq
700
+ assert isinstance(dates, type(self._data))
701
+ # error: Item "ExtensionArray" of "ExtensionArray |
702
+ # ndarray[Any, Any]" has no attribute "_freq"
703
+ assert dates._freq == self.freq # type: ignore[union-attr]
704
+ result = type(self)._simple_new(dates)
705
+ return result
706
+ else:
707
+ return left
708
+
709
+ def _union(self, other, sort):
710
+ # We are called by `union`, which is responsible for this validation
711
+ assert isinstance(other, type(self))
712
+ assert self.dtype == other.dtype
713
+
714
+ if self._can_range_setop(other):
715
+ return self._range_union(other, sort=sort)
716
+
717
+ if self._can_fast_union(other):
718
+ result = self._fast_union(other, sort=sort)
719
+ # in the case with sort=None, the _can_fast_union check ensures
720
+ # that result.freq == self.freq
721
+ return result
722
+ else:
723
+ return super()._union(other, sort)._with_freq("infer")
724
+
725
+ # --------------------------------------------------------------------
726
+ # Join Methods
727
+
728
+ def _get_join_freq(self, other):
729
+ """
730
+ Get the freq to attach to the result of a join operation.
731
+ """
732
+ freq = None
733
+ if self._can_fast_union(other):
734
+ freq = self.freq
735
+ return freq
736
+
737
+ def _wrap_joined_index(
738
+ self, joined, other, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp]
739
+ ):
740
+ assert other.dtype == self.dtype, (other.dtype, self.dtype)
741
+ result = super()._wrap_joined_index(joined, other, lidx, ridx)
742
+ result._data._freq = self._get_join_freq(other)
743
+ return result
744
+
745
+ def _get_engine_target(self) -> np.ndarray:
746
+ # engine methods and libjoin methods need dt64/td64 values cast to i8
747
+ return self._data._ndarray.view("i8")
748
+
749
+ def _from_join_target(self, result: np.ndarray):
750
+ # view e.g. i8 back to M8[ns]
751
+ result = result.view(self._data._ndarray.dtype)
752
+ return self._data._from_backing_data(result)
753
+
754
+ # --------------------------------------------------------------------
755
+ # List-like Methods
756
+
757
+ def _get_delete_freq(self, loc: int | slice | Sequence[int]):
758
+ """
759
+ Find the `freq` for self.delete(loc).
760
+ """
761
+ freq = None
762
+ if self.freq is not None:
763
+ if is_integer(loc):
764
+ if loc in (0, -len(self), -1, len(self) - 1):
765
+ freq = self.freq
766
+ else:
767
+ if is_list_like(loc):
768
+ # error: Incompatible types in assignment (expression has
769
+ # type "Union[slice, ndarray]", variable has type
770
+ # "Union[int, slice, Sequence[int]]")
771
+ loc = lib.maybe_indices_to_slice( # type: ignore[assignment]
772
+ np.asarray(loc, dtype=np.intp), len(self)
773
+ )
774
+ if isinstance(loc, slice) and loc.step in (1, None):
775
+ if loc.start in (0, None) or loc.stop in (len(self), None):
776
+ freq = self.freq
777
+ return freq
778
+
779
+ def _get_insert_freq(self, loc: int, item):
780
+ """
781
+ Find the `freq` for self.insert(loc, item).
782
+ """
783
+ value = self._data._validate_scalar(item)
784
+ item = self._data._box_func(value)
785
+
786
+ freq = None
787
+ if self.freq is not None:
788
+ # freq can be preserved on edge cases
789
+ if self.size:
790
+ if item is NaT:
791
+ pass
792
+ elif loc in (0, -len(self)) and item + self.freq == self[0]:
793
+ freq = self.freq
794
+ elif (loc == len(self)) and item - self.freq == self[-1]:
795
+ freq = self.freq
796
+ else:
797
+ # Adding a single item to an empty index may preserve freq
798
+ if isinstance(self.freq, Tick):
799
+ # all TimedeltaIndex cases go through here; is_on_offset
800
+ # would raise TypeError
801
+ freq = self.freq
802
+ elif self.freq.is_on_offset(item):
803
+ freq = self.freq
804
+ return freq
805
+
806
+ @doc(NDArrayBackedExtensionIndex.delete)
807
+ def delete(self, loc) -> Self:
808
+ result = super().delete(loc)
809
+ result._data._freq = self._get_delete_freq(loc)
810
+ return result
811
+
812
+ @doc(NDArrayBackedExtensionIndex.insert)
813
+ def insert(self, loc: int, item):
814
+ result = super().insert(loc, item)
815
+ if isinstance(result, type(self)):
816
+ # i.e. parent class method did not cast
817
+ result._data._freq = self._get_insert_freq(loc, item)
818
+ return result
819
+
820
+ # --------------------------------------------------------------------
821
+ # NDArray-Like Methods
822
+
823
+ @Appender(_index_shared_docs["take"] % _index_doc_kwargs)
824
+ def take(
825
+ self,
826
+ indices,
827
+ axis: Axis = 0,
828
+ allow_fill: bool = True,
829
+ fill_value=None,
830
+ **kwargs,
831
+ ) -> Self:
832
+ nv.validate_take((), kwargs)
833
+ indices = np.asarray(indices, dtype=np.intp)
834
+
835
+ result = NDArrayBackedExtensionIndex.take(
836
+ self, indices, axis, allow_fill, fill_value, **kwargs
837
+ )
838
+
839
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
840
+ if isinstance(maybe_slice, slice):
841
+ freq = self._data._get_getitem_freq(maybe_slice)
842
+ result._data._freq = freq
843
+ return result
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/datetimes.py ADDED
@@ -0,0 +1,1127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import datetime as dt
4
+ import operator
5
+ from typing import TYPE_CHECKING
6
+ import warnings
7
+
8
+ import numpy as np
9
+ import pytz
10
+
11
+ from pandas._libs import (
12
+ NaT,
13
+ Period,
14
+ Timestamp,
15
+ index as libindex,
16
+ lib,
17
+ )
18
+ from pandas._libs.tslibs import (
19
+ Resolution,
20
+ Tick,
21
+ Timedelta,
22
+ periods_per_day,
23
+ timezones,
24
+ to_offset,
25
+ )
26
+ from pandas._libs.tslibs.offsets import prefix_mapping
27
+ from pandas.util._decorators import (
28
+ cache_readonly,
29
+ doc,
30
+ )
31
+ from pandas.util._exceptions import find_stack_level
32
+
33
+ from pandas.core.dtypes.common import is_scalar
34
+ from pandas.core.dtypes.dtypes import DatetimeTZDtype
35
+ from pandas.core.dtypes.generic import ABCSeries
36
+ from pandas.core.dtypes.missing import is_valid_na_for_dtype
37
+
38
+ from pandas.core.arrays.datetimes import (
39
+ DatetimeArray,
40
+ tz_to_dtype,
41
+ )
42
+ import pandas.core.common as com
43
+ from pandas.core.indexes.base import (
44
+ Index,
45
+ maybe_extract_name,
46
+ )
47
+ from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
48
+ from pandas.core.indexes.extension import inherit_names
49
+ from pandas.core.tools.times import to_time
50
+
51
+ if TYPE_CHECKING:
52
+ from collections.abc import Hashable
53
+
54
+ from pandas._typing import (
55
+ Dtype,
56
+ DtypeObj,
57
+ Frequency,
58
+ IntervalClosedType,
59
+ Self,
60
+ TimeAmbiguous,
61
+ TimeNonexistent,
62
+ npt,
63
+ )
64
+
65
+ from pandas.core.api import (
66
+ DataFrame,
67
+ PeriodIndex,
68
+ )
69
+
70
+ from pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR
71
+
72
+
73
+ def _new_DatetimeIndex(cls, d):
74
+ """
75
+ This is called upon unpickling, rather than the default which doesn't
76
+ have arguments and breaks __new__
77
+ """
78
+ if "data" in d and not isinstance(d["data"], DatetimeIndex):
79
+ # Avoid need to verify integrity by calling simple_new directly
80
+ data = d.pop("data")
81
+ if not isinstance(data, DatetimeArray):
82
+ # For backward compat with older pickles, we may need to construct
83
+ # a DatetimeArray to adapt to the newer _simple_new signature
84
+ tz = d.pop("tz")
85
+ freq = d.pop("freq")
86
+ dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)
87
+ else:
88
+ dta = data
89
+ for key in ["tz", "freq"]:
90
+ # These are already stored in our DatetimeArray; if they are
91
+ # also in the pickle and don't match, we have a problem.
92
+ if key in d:
93
+ assert d[key] == getattr(dta, key)
94
+ d.pop(key)
95
+ result = cls._simple_new(dta, **d)
96
+ else:
97
+ with warnings.catch_warnings():
98
+ # TODO: If we knew what was going in to **d, we might be able to
99
+ # go through _simple_new instead
100
+ warnings.simplefilter("ignore")
101
+ result = cls.__new__(cls, **d)
102
+
103
+ return result
104
+
105
+
106
+ @inherit_names(
107
+ DatetimeArray._field_ops
108
+ + [
109
+ method
110
+ for method in DatetimeArray._datetimelike_methods
111
+ if method not in ("tz_localize", "tz_convert", "strftime")
112
+ ],
113
+ DatetimeArray,
114
+ wrap=True,
115
+ )
116
+ @inherit_names(["is_normalized"], DatetimeArray, cache=True)
117
+ @inherit_names(
118
+ [
119
+ "tz",
120
+ "tzinfo",
121
+ "dtype",
122
+ "to_pydatetime",
123
+ "date",
124
+ "time",
125
+ "timetz",
126
+ "std",
127
+ ]
128
+ + DatetimeArray._bool_ops,
129
+ DatetimeArray,
130
+ )
131
+ class DatetimeIndex(DatetimeTimedeltaMixin):
132
+ """
133
+ Immutable ndarray-like of datetime64 data.
134
+
135
+ Represented internally as int64, and which can be boxed to Timestamp objects
136
+ that are subclasses of datetime and carry metadata.
137
+
138
+ .. versionchanged:: 2.0.0
139
+ The various numeric date/time attributes (:attr:`~DatetimeIndex.day`,
140
+ :attr:`~DatetimeIndex.month`, :attr:`~DatetimeIndex.year` etc.) now have dtype
141
+ ``int32``. Previously they had dtype ``int64``.
142
+
143
+ Parameters
144
+ ----------
145
+ data : array-like (1-dimensional)
146
+ Datetime-like data to construct index with.
147
+ freq : str or pandas offset object, optional
148
+ One of pandas date offset strings or corresponding objects. The string
149
+ 'infer' can be passed in order to set the frequency of the index as the
150
+ inferred frequency upon creation.
151
+ tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str
152
+ Set the Timezone of the data.
153
+ normalize : bool, default False
154
+ Normalize start/end dates to midnight before generating date range.
155
+
156
+ .. deprecated:: 2.1.0
157
+
158
+ closed : {'left', 'right'}, optional
159
+ Set whether to include `start` and `end` that are on the
160
+ boundary. The default includes boundary points on either end.
161
+
162
+ .. deprecated:: 2.1.0
163
+
164
+ ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
165
+ When clocks moved backward due to DST, ambiguous times may arise.
166
+ For example in Central European Time (UTC+01), when going from 03:00
167
+ DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC
168
+ and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter
169
+ dictates how ambiguous times should be handled.
170
+
171
+ - 'infer' will attempt to infer fall dst-transition hours based on
172
+ order
173
+ - bool-ndarray where True signifies a DST time, False signifies a
174
+ non-DST time (note that this flag is only applicable for ambiguous
175
+ times)
176
+ - 'NaT' will return NaT where there are ambiguous times
177
+ - 'raise' will raise an AmbiguousTimeError if there are ambiguous times.
178
+ dayfirst : bool, default False
179
+ If True, parse dates in `data` with the day first order.
180
+ yearfirst : bool, default False
181
+ If True parse dates in `data` with the year first order.
182
+ dtype : numpy.dtype or DatetimeTZDtype or str, default None
183
+ Note that the only NumPy dtype allowed is `datetime64[ns]`.
184
+ copy : bool, default False
185
+ Make a copy of input ndarray.
186
+ name : label, default None
187
+ Name to be stored in the index.
188
+
189
+ Attributes
190
+ ----------
191
+ year
192
+ month
193
+ day
194
+ hour
195
+ minute
196
+ second
197
+ microsecond
198
+ nanosecond
199
+ date
200
+ time
201
+ timetz
202
+ dayofyear
203
+ day_of_year
204
+ dayofweek
205
+ day_of_week
206
+ weekday
207
+ quarter
208
+ tz
209
+ freq
210
+ freqstr
211
+ is_month_start
212
+ is_month_end
213
+ is_quarter_start
214
+ is_quarter_end
215
+ is_year_start
216
+ is_year_end
217
+ is_leap_year
218
+ inferred_freq
219
+
220
+ Methods
221
+ -------
222
+ normalize
223
+ strftime
224
+ snap
225
+ tz_convert
226
+ tz_localize
227
+ round
228
+ floor
229
+ ceil
230
+ to_period
231
+ to_pydatetime
232
+ to_series
233
+ to_frame
234
+ month_name
235
+ day_name
236
+ mean
237
+ std
238
+
239
+ See Also
240
+ --------
241
+ Index : The base pandas Index type.
242
+ TimedeltaIndex : Index of timedelta64 data.
243
+ PeriodIndex : Index of Period data.
244
+ to_datetime : Convert argument to datetime.
245
+ date_range : Create a fixed-frequency DatetimeIndex.
246
+
247
+ Notes
248
+ -----
249
+ To learn more about the frequency strings, please see `this link
250
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
251
+
252
+ Examples
253
+ --------
254
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
255
+ >>> idx
256
+ DatetimeIndex(['2020-01-01 10:00:00+00:00', '2020-02-01 11:00:00+00:00'],
257
+ dtype='datetime64[ns, UTC]', freq=None)
258
+ """
259
+
260
+ _typ = "datetimeindex"
261
+
262
+ _data_cls = DatetimeArray
263
+ _supports_partial_string_indexing = True
264
+
265
+ @property
266
+ def _engine_type(self) -> type[libindex.DatetimeEngine]:
267
+ return libindex.DatetimeEngine
268
+
269
+ _data: DatetimeArray
270
+ _values: DatetimeArray
271
+ tz: dt.tzinfo | None
272
+
273
+ # --------------------------------------------------------------------
274
+ # methods that dispatch to DatetimeArray and wrap result
275
+
276
+ @doc(DatetimeArray.strftime)
277
+ def strftime(self, date_format) -> Index:
278
+ arr = self._data.strftime(date_format)
279
+ return Index(arr, name=self.name, dtype=object)
280
+
281
+ @doc(DatetimeArray.tz_convert)
282
+ def tz_convert(self, tz) -> Self:
283
+ arr = self._data.tz_convert(tz)
284
+ return type(self)._simple_new(arr, name=self.name, refs=self._references)
285
+
286
+ @doc(DatetimeArray.tz_localize)
287
+ def tz_localize(
288
+ self,
289
+ tz,
290
+ ambiguous: TimeAmbiguous = "raise",
291
+ nonexistent: TimeNonexistent = "raise",
292
+ ) -> Self:
293
+ arr = self._data.tz_localize(tz, ambiguous, nonexistent)
294
+ return type(self)._simple_new(arr, name=self.name)
295
+
296
+ @doc(DatetimeArray.to_period)
297
+ def to_period(self, freq=None) -> PeriodIndex:
298
+ from pandas.core.indexes.api import PeriodIndex
299
+
300
+ arr = self._data.to_period(freq)
301
+ return PeriodIndex._simple_new(arr, name=self.name)
302
+
303
+ @doc(DatetimeArray.to_julian_date)
304
+ def to_julian_date(self) -> Index:
305
+ arr = self._data.to_julian_date()
306
+ return Index._simple_new(arr, name=self.name)
307
+
308
+ @doc(DatetimeArray.isocalendar)
309
+ def isocalendar(self) -> DataFrame:
310
+ df = self._data.isocalendar()
311
+ return df.set_index(self)
312
+
313
+ @cache_readonly
314
+ def _resolution_obj(self) -> Resolution:
315
+ return self._data._resolution_obj
316
+
317
+ # --------------------------------------------------------------------
318
+ # Constructors
319
+
320
+ def __new__(
321
+ cls,
322
+ data=None,
323
+ freq: Frequency | lib.NoDefault = lib.no_default,
324
+ tz=lib.no_default,
325
+ normalize: bool | lib.NoDefault = lib.no_default,
326
+ closed=lib.no_default,
327
+ ambiguous: TimeAmbiguous = "raise",
328
+ dayfirst: bool = False,
329
+ yearfirst: bool = False,
330
+ dtype: Dtype | None = None,
331
+ copy: bool = False,
332
+ name: Hashable | None = None,
333
+ ) -> Self:
334
+ if closed is not lib.no_default:
335
+ # GH#52628
336
+ warnings.warn(
337
+ f"The 'closed' keyword in {cls.__name__} construction is "
338
+ "deprecated and will be removed in a future version.",
339
+ FutureWarning,
340
+ stacklevel=find_stack_level(),
341
+ )
342
+ if normalize is not lib.no_default:
343
+ # GH#52628
344
+ warnings.warn(
345
+ f"The 'normalize' keyword in {cls.__name__} construction is "
346
+ "deprecated and will be removed in a future version.",
347
+ FutureWarning,
348
+ stacklevel=find_stack_level(),
349
+ )
350
+
351
+ if is_scalar(data):
352
+ cls._raise_scalar_data_error(data)
353
+
354
+ # - Cases checked above all return/raise before reaching here - #
355
+
356
+ name = maybe_extract_name(name, data, cls)
357
+
358
+ if (
359
+ isinstance(data, DatetimeArray)
360
+ and freq is lib.no_default
361
+ and tz is lib.no_default
362
+ and dtype is None
363
+ ):
364
+ # fastpath, similar logic in TimedeltaIndex.__new__;
365
+ # Note in this particular case we retain non-nano.
366
+ if copy:
367
+ data = data.copy()
368
+ return cls._simple_new(data, name=name)
369
+
370
+ dtarr = DatetimeArray._from_sequence_not_strict(
371
+ data,
372
+ dtype=dtype,
373
+ copy=copy,
374
+ tz=tz,
375
+ freq=freq,
376
+ dayfirst=dayfirst,
377
+ yearfirst=yearfirst,
378
+ ambiguous=ambiguous,
379
+ )
380
+ refs = None
381
+ if not copy and isinstance(data, (Index, ABCSeries)):
382
+ refs = data._references
383
+
384
+ subarr = cls._simple_new(dtarr, name=name, refs=refs)
385
+ return subarr
386
+
387
+ # --------------------------------------------------------------------
388
+
389
+ @cache_readonly
390
+ def _is_dates_only(self) -> bool:
391
+ """
392
+ Return a boolean if we are only dates (and don't have a timezone)
393
+
394
+ Returns
395
+ -------
396
+ bool
397
+ """
398
+ if isinstance(self.freq, Tick):
399
+ delta = Timedelta(self.freq)
400
+
401
+ if delta % dt.timedelta(days=1) != dt.timedelta(days=0):
402
+ return False
403
+
404
+ return self._values._is_dates_only
405
+
406
+ def __reduce__(self):
407
+ d = {"data": self._data, "name": self.name}
408
+ return _new_DatetimeIndex, (type(self), d), None
409
+
410
+ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
411
+ """
412
+ Can we compare values of the given dtype to our own?
413
+ """
414
+ if self.tz is not None:
415
+ # If we have tz, we can compare to tzaware
416
+ return isinstance(dtype, DatetimeTZDtype)
417
+ # if we dont have tz, we can only compare to tznaive
418
+ return lib.is_np_dtype(dtype, "M")
419
+
420
+ # --------------------------------------------------------------------
421
+ # Rendering Methods
422
+
423
+ @cache_readonly
424
+ def _formatter_func(self):
425
+ # Note this is equivalent to the DatetimeIndexOpsMixin method but
426
+ # uses the maybe-cached self._is_dates_only instead of re-computing it.
427
+ from pandas.io.formats.format import get_format_datetime64
428
+
429
+ formatter = get_format_datetime64(is_dates_only=self._is_dates_only)
430
+ return lambda x: f"'{formatter(x)}'"
431
+
432
+ # --------------------------------------------------------------------
433
+ # Set Operation Methods
434
+
435
+ def _can_range_setop(self, other) -> bool:
436
+ # GH 46702: If self or other have non-UTC tzs, DST transitions prevent
437
+ # range representation due to no singular step
438
+ if (
439
+ self.tz is not None
440
+ and not timezones.is_utc(self.tz)
441
+ and not timezones.is_fixed_offset(self.tz)
442
+ ):
443
+ return False
444
+ if (
445
+ other.tz is not None
446
+ and not timezones.is_utc(other.tz)
447
+ and not timezones.is_fixed_offset(other.tz)
448
+ ):
449
+ return False
450
+ return super()._can_range_setop(other)
451
+
452
+ # --------------------------------------------------------------------
453
+
454
+ def _get_time_micros(self) -> npt.NDArray[np.int64]:
455
+ """
456
+ Return the number of microseconds since midnight.
457
+
458
+ Returns
459
+ -------
460
+ ndarray[int64_t]
461
+ """
462
+ values = self._data._local_timestamps()
463
+
464
+ ppd = periods_per_day(self._data._creso)
465
+
466
+ frac = values % ppd
467
+ if self.unit == "ns":
468
+ micros = frac // 1000
469
+ elif self.unit == "us":
470
+ micros = frac
471
+ elif self.unit == "ms":
472
+ micros = frac * 1000
473
+ elif self.unit == "s":
474
+ micros = frac * 1_000_000
475
+ else: # pragma: no cover
476
+ raise NotImplementedError(self.unit)
477
+
478
+ micros[self._isnan] = -1
479
+ return micros
480
+
481
+ def snap(self, freq: Frequency = "S") -> DatetimeIndex:
482
+ """
483
+ Snap time stamps to nearest occurring frequency.
484
+
485
+ Returns
486
+ -------
487
+ DatetimeIndex
488
+
489
+ Examples
490
+ --------
491
+ >>> idx = pd.DatetimeIndex(['2023-01-01', '2023-01-02',
492
+ ... '2023-02-01', '2023-02-02'])
493
+ >>> idx
494
+ DatetimeIndex(['2023-01-01', '2023-01-02', '2023-02-01', '2023-02-02'],
495
+ dtype='datetime64[ns]', freq=None)
496
+ >>> idx.snap('MS')
497
+ DatetimeIndex(['2023-01-01', '2023-01-01', '2023-02-01', '2023-02-01'],
498
+ dtype='datetime64[ns]', freq=None)
499
+ """
500
+ # Superdumb, punting on any optimizing
501
+ freq = to_offset(freq)
502
+
503
+ dta = self._data.copy()
504
+
505
+ for i, v in enumerate(self):
506
+ s = v
507
+ if not freq.is_on_offset(s):
508
+ t0 = freq.rollback(s)
509
+ t1 = freq.rollforward(s)
510
+ if abs(s - t0) < abs(t1 - s):
511
+ s = t0
512
+ else:
513
+ s = t1
514
+ dta[i] = s
515
+
516
+ return DatetimeIndex._simple_new(dta, name=self.name)
517
+
518
+ # --------------------------------------------------------------------
519
+ # Indexing Methods
520
+
521
+ def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime):
522
+ """
523
+ Calculate datetime bounds for parsed time string and its resolution.
524
+
525
+ Parameters
526
+ ----------
527
+ reso : Resolution
528
+ Resolution provided by parsed string.
529
+ parsed : datetime
530
+ Datetime from parsed string.
531
+
532
+ Returns
533
+ -------
534
+ lower, upper: pd.Timestamp
535
+ """
536
+ freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev)
537
+ per = Period(parsed, freq=freq)
538
+ start, end = per.start_time, per.end_time
539
+
540
+ # GH 24076
541
+ # If an incoming date string contained a UTC offset, need to localize
542
+ # the parsed date to this offset first before aligning with the index's
543
+ # timezone
544
+ start = start.tz_localize(parsed.tzinfo)
545
+ end = end.tz_localize(parsed.tzinfo)
546
+
547
+ if parsed.tzinfo is not None:
548
+ if self.tz is None:
549
+ raise ValueError(
550
+ "The index must be timezone aware when indexing "
551
+ "with a date string with a UTC offset"
552
+ )
553
+ # The flipped case with parsed.tz is None and self.tz is not None
554
+ # is ruled out bc parsed and reso are produced by _parse_with_reso,
555
+ # which localizes parsed.
556
+ return start, end
557
+
558
+ def _parse_with_reso(self, label: str):
559
+ parsed, reso = super()._parse_with_reso(label)
560
+
561
+ parsed = Timestamp(parsed)
562
+
563
+ if self.tz is not None and parsed.tzinfo is None:
564
+ # we special-case timezone-naive strings and timezone-aware
565
+ # DatetimeIndex
566
+ # https://github.com/pandas-dev/pandas/pull/36148#issuecomment-687883081
567
+ parsed = parsed.tz_localize(self.tz)
568
+
569
+ return parsed, reso
570
+
571
+ def _disallow_mismatched_indexing(self, key) -> None:
572
+ """
573
+ Check for mismatched-tzawareness indexing and re-raise as KeyError.
574
+ """
575
+ # we get here with isinstance(key, self._data._recognized_scalars)
576
+ try:
577
+ # GH#36148
578
+ self._data._assert_tzawareness_compat(key)
579
+ except TypeError as err:
580
+ raise KeyError(key) from err
581
+
582
+ def get_loc(self, key):
583
+ """
584
+ Get integer location for requested label
585
+
586
+ Returns
587
+ -------
588
+ loc : int
589
+ """
590
+ self._check_indexing_error(key)
591
+
592
+ orig_key = key
593
+ if is_valid_na_for_dtype(key, self.dtype):
594
+ key = NaT
595
+
596
+ if isinstance(key, self._data._recognized_scalars):
597
+ # needed to localize naive datetimes
598
+ self._disallow_mismatched_indexing(key)
599
+ key = Timestamp(key)
600
+
601
+ elif isinstance(key, str):
602
+ try:
603
+ parsed, reso = self._parse_with_reso(key)
604
+ except (ValueError, pytz.NonExistentTimeError) as err:
605
+ raise KeyError(key) from err
606
+ self._disallow_mismatched_indexing(parsed)
607
+
608
+ if self._can_partial_date_slice(reso):
609
+ try:
610
+ return self._partial_date_slice(reso, parsed)
611
+ except KeyError as err:
612
+ raise KeyError(key) from err
613
+
614
+ key = parsed
615
+
616
+ elif isinstance(key, dt.timedelta):
617
+ # GH#20464
618
+ raise TypeError(
619
+ f"Cannot index {type(self).__name__} with {type(key).__name__}"
620
+ )
621
+
622
+ elif isinstance(key, dt.time):
623
+ return self.indexer_at_time(key)
624
+
625
+ else:
626
+ # unrecognized type
627
+ raise KeyError(key)
628
+
629
+ try:
630
+ return Index.get_loc(self, key)
631
+ except KeyError as err:
632
+ raise KeyError(orig_key) from err
633
+
634
+ @doc(DatetimeTimedeltaMixin._maybe_cast_slice_bound)
635
+ def _maybe_cast_slice_bound(self, label, side: str):
636
+ # GH#42855 handle date here instead of get_slice_bound
637
+ if isinstance(label, dt.date) and not isinstance(label, dt.datetime):
638
+ # Pandas supports slicing with dates, treated as datetimes at midnight.
639
+ # https://github.com/pandas-dev/pandas/issues/31501
640
+ label = Timestamp(label).to_pydatetime()
641
+
642
+ label = super()._maybe_cast_slice_bound(label, side)
643
+ self._data._assert_tzawareness_compat(label)
644
+ return Timestamp(label)
645
+
646
+ def slice_indexer(self, start=None, end=None, step=None):
647
+ """
648
+ Return indexer for specified label slice.
649
+ Index.slice_indexer, customized to handle time slicing.
650
+
651
+ In addition to functionality provided by Index.slice_indexer, does the
652
+ following:
653
+
654
+ - if both `start` and `end` are instances of `datetime.time`, it
655
+ invokes `indexer_between_time`
656
+ - if `start` and `end` are both either string or None perform
657
+ value-based selection in non-monotonic cases.
658
+
659
+ """
660
+ # For historical reasons DatetimeIndex supports slices between two
661
+ # instances of datetime.time as if it were applying a slice mask to
662
+ # an array of (self.hour, self.minute, self.seconds, self.microsecond).
663
+ if isinstance(start, dt.time) and isinstance(end, dt.time):
664
+ if step is not None and step != 1:
665
+ raise ValueError("Must have step size of 1 with time slices")
666
+ return self.indexer_between_time(start, end)
667
+
668
+ if isinstance(start, dt.time) or isinstance(end, dt.time):
669
+ raise KeyError("Cannot mix time and non-time slice keys")
670
+
671
+ def check_str_or_none(point) -> bool:
672
+ return point is not None and not isinstance(point, str)
673
+
674
+ # GH#33146 if start and end are combinations of str and None and Index is not
675
+ # monotonic, we can not use Index.slice_indexer because it does not honor the
676
+ # actual elements, is only searching for start and end
677
+ if (
678
+ check_str_or_none(start)
679
+ or check_str_or_none(end)
680
+ or self.is_monotonic_increasing
681
+ ):
682
+ return Index.slice_indexer(self, start, end, step)
683
+
684
+ mask = np.array(True)
685
+ in_index = True
686
+ if start is not None:
687
+ start_casted = self._maybe_cast_slice_bound(start, "left")
688
+ mask = start_casted <= self
689
+ in_index &= (start_casted == self).any()
690
+
691
+ if end is not None:
692
+ end_casted = self._maybe_cast_slice_bound(end, "right")
693
+ mask = (self <= end_casted) & mask
694
+ in_index &= (end_casted == self).any()
695
+
696
+ if not in_index:
697
+ raise KeyError(
698
+ "Value based partial slicing on non-monotonic DatetimeIndexes "
699
+ "with non-existing keys is not allowed.",
700
+ )
701
+ indexer = mask.nonzero()[0][::step]
702
+ if len(indexer) == len(self):
703
+ return slice(None)
704
+ else:
705
+ return indexer
706
+
707
+ # --------------------------------------------------------------------
708
+
709
+ @property
710
+ def inferred_type(self) -> str:
711
+ # b/c datetime is represented as microseconds since the epoch, make
712
+ # sure we can't have ambiguous indexing
713
+ return "datetime64"
714
+
715
+ def indexer_at_time(self, time, asof: bool = False) -> npt.NDArray[np.intp]:
716
+ """
717
+ Return index locations of values at particular time of day.
718
+
719
+ Parameters
720
+ ----------
721
+ time : datetime.time or str
722
+ Time passed in either as object (datetime.time) or as string in
723
+ appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
724
+ "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p").
725
+
726
+ Returns
727
+ -------
728
+ np.ndarray[np.intp]
729
+
730
+ See Also
731
+ --------
732
+ indexer_between_time : Get index locations of values between particular
733
+ times of day.
734
+ DataFrame.at_time : Select values at particular time of day.
735
+
736
+ Examples
737
+ --------
738
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00", "2/1/2020 11:00",
739
+ ... "3/1/2020 10:00"])
740
+ >>> idx.indexer_at_time("10:00")
741
+ array([0, 2])
742
+ """
743
+ if asof:
744
+ raise NotImplementedError("'asof' argument is not supported")
745
+
746
+ if isinstance(time, str):
747
+ from dateutil.parser import parse
748
+
749
+ time = parse(time).time()
750
+
751
+ if time.tzinfo:
752
+ if self.tz is None:
753
+ raise ValueError("Index must be timezone aware.")
754
+ time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
755
+ else:
756
+ time_micros = self._get_time_micros()
757
+ micros = _time_to_micros(time)
758
+ return (time_micros == micros).nonzero()[0]
759
+
760
+ def indexer_between_time(
761
+ self, start_time, end_time, include_start: bool = True, include_end: bool = True
762
+ ) -> npt.NDArray[np.intp]:
763
+ """
764
+ Return index locations of values between particular times of day.
765
+
766
+ Parameters
767
+ ----------
768
+ start_time, end_time : datetime.time, str
769
+ Time passed either as object (datetime.time) or as string in
770
+ appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
771
+ "%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
772
+ include_start : bool, default True
773
+ include_end : bool, default True
774
+
775
+ Returns
776
+ -------
777
+ np.ndarray[np.intp]
778
+
779
+ See Also
780
+ --------
781
+ indexer_at_time : Get index locations of values at particular time of day.
782
+ DataFrame.between_time : Select values between particular times of day.
783
+
784
+ Examples
785
+ --------
786
+ >>> idx = pd.date_range("2023-01-01", periods=4, freq="h")
787
+ >>> idx
788
+ DatetimeIndex(['2023-01-01 00:00:00', '2023-01-01 01:00:00',
789
+ '2023-01-01 02:00:00', '2023-01-01 03:00:00'],
790
+ dtype='datetime64[ns]', freq='h')
791
+ >>> idx.indexer_between_time("00:00", "2:00", include_end=False)
792
+ array([0, 1])
793
+ """
794
+ start_time = to_time(start_time)
795
+ end_time = to_time(end_time)
796
+ time_micros = self._get_time_micros()
797
+ start_micros = _time_to_micros(start_time)
798
+ end_micros = _time_to_micros(end_time)
799
+
800
+ if include_start and include_end:
801
+ lop = rop = operator.le
802
+ elif include_start:
803
+ lop = operator.le
804
+ rop = operator.lt
805
+ elif include_end:
806
+ lop = operator.lt
807
+ rop = operator.le
808
+ else:
809
+ lop = rop = operator.lt
810
+
811
+ if start_time <= end_time:
812
+ join_op = operator.and_
813
+ else:
814
+ join_op = operator.or_
815
+
816
+ mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
817
+
818
+ return mask.nonzero()[0]
819
+
820
+
821
+ def date_range(
822
+ start=None,
823
+ end=None,
824
+ periods=None,
825
+ freq=None,
826
+ tz=None,
827
+ normalize: bool = False,
828
+ name: Hashable | None = None,
829
+ inclusive: IntervalClosedType = "both",
830
+ *,
831
+ unit: str | None = None,
832
+ **kwargs,
833
+ ) -> DatetimeIndex:
834
+ """
835
+ Return a fixed frequency DatetimeIndex.
836
+
837
+ Returns the range of equally spaced time points (where the difference between any
838
+ two adjacent points is specified by the given frequency) such that they all
839
+ satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp.,
840
+ the first and last time points in that range that fall on the boundary of ``freq``
841
+ (if given as a frequency string) or that are valid for ``freq`` (if given as a
842
+ :class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``,
843
+ ``end``, or ``freq`` is *not* specified, this missing parameter can be computed
844
+ given ``periods``, the number of timesteps in the range. See the note below.)
845
+
846
+ Parameters
847
+ ----------
848
+ start : str or datetime-like, optional
849
+ Left bound for generating dates.
850
+ end : str or datetime-like, optional
851
+ Right bound for generating dates.
852
+ periods : int, optional
853
+ Number of periods to generate.
854
+ freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D'
855
+ Frequency strings can have multiples, e.g. '5h'. See
856
+ :ref:`here <timeseries.offset_aliases>` for a list of
857
+ frequency aliases.
858
+ tz : str or tzinfo, optional
859
+ Time zone name for returning localized DatetimeIndex, for example
860
+ 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
861
+ timezone-naive unless timezone-aware datetime-likes are passed.
862
+ normalize : bool, default False
863
+ Normalize start/end dates to midnight before generating date range.
864
+ name : str, default None
865
+ Name of the resulting DatetimeIndex.
866
+ inclusive : {"both", "neither", "left", "right"}, default "both"
867
+ Include boundaries; Whether to set each bound as closed or open.
868
+
869
+ .. versionadded:: 1.4.0
870
+ unit : str, default None
871
+ Specify the desired resolution of the result.
872
+
873
+ .. versionadded:: 2.0.0
874
+ **kwargs
875
+ For compatibility. Has no effect on the result.
876
+
877
+ Returns
878
+ -------
879
+ DatetimeIndex
880
+
881
+ See Also
882
+ --------
883
+ DatetimeIndex : An immutable container for datetimes.
884
+ timedelta_range : Return a fixed frequency TimedeltaIndex.
885
+ period_range : Return a fixed frequency PeriodIndex.
886
+ interval_range : Return a fixed frequency IntervalIndex.
887
+
888
+ Notes
889
+ -----
890
+ Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
891
+ exactly three must be specified. If ``freq`` is omitted, the resulting
892
+ ``DatetimeIndex`` will have ``periods`` linearly spaced elements between
893
+ ``start`` and ``end`` (closed on both sides).
894
+
895
+ To learn more about the frequency strings, please see `this link
896
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
897
+
898
+ Examples
899
+ --------
900
+ **Specifying the values**
901
+
902
+ The next four examples generate the same `DatetimeIndex`, but vary
903
+ the combination of `start`, `end` and `periods`.
904
+
905
+ Specify `start` and `end`, with the default daily frequency.
906
+
907
+ >>> pd.date_range(start='1/1/2018', end='1/08/2018')
908
+ DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
909
+ '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
910
+ dtype='datetime64[ns]', freq='D')
911
+
912
+ Specify timezone-aware `start` and `end`, with the default daily frequency.
913
+
914
+ >>> pd.date_range(
915
+ ... start=pd.to_datetime("1/1/2018").tz_localize("Europe/Berlin"),
916
+ ... end=pd.to_datetime("1/08/2018").tz_localize("Europe/Berlin"),
917
+ ... )
918
+ DatetimeIndex(['2018-01-01 00:00:00+01:00', '2018-01-02 00:00:00+01:00',
919
+ '2018-01-03 00:00:00+01:00', '2018-01-04 00:00:00+01:00',
920
+ '2018-01-05 00:00:00+01:00', '2018-01-06 00:00:00+01:00',
921
+ '2018-01-07 00:00:00+01:00', '2018-01-08 00:00:00+01:00'],
922
+ dtype='datetime64[ns, Europe/Berlin]', freq='D')
923
+
924
+ Specify `start` and `periods`, the number of periods (days).
925
+
926
+ >>> pd.date_range(start='1/1/2018', periods=8)
927
+ DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
928
+ '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
929
+ dtype='datetime64[ns]', freq='D')
930
+
931
+ Specify `end` and `periods`, the number of periods (days).
932
+
933
+ >>> pd.date_range(end='1/1/2018', periods=8)
934
+ DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
935
+ '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
936
+ dtype='datetime64[ns]', freq='D')
937
+
938
+ Specify `start`, `end`, and `periods`; the frequency is generated
939
+ automatically (linearly spaced).
940
+
941
+ >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
942
+ DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
943
+ '2018-04-27 00:00:00'],
944
+ dtype='datetime64[ns]', freq=None)
945
+
946
+ **Other Parameters**
947
+
948
+ Changed the `freq` (frequency) to ``'ME'`` (month end frequency).
949
+
950
+ >>> pd.date_range(start='1/1/2018', periods=5, freq='ME')
951
+ DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
952
+ '2018-05-31'],
953
+ dtype='datetime64[ns]', freq='ME')
954
+
955
+ Multiples are allowed
956
+
957
+ >>> pd.date_range(start='1/1/2018', periods=5, freq='3ME')
958
+ DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
959
+ '2019-01-31'],
960
+ dtype='datetime64[ns]', freq='3ME')
961
+
962
+ `freq` can also be specified as an Offset object.
963
+
964
+ >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
965
+ DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
966
+ '2019-01-31'],
967
+ dtype='datetime64[ns]', freq='3ME')
968
+
969
+ Specify `tz` to set the timezone.
970
+
971
+ >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
972
+ DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
973
+ '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
974
+ '2018-01-05 00:00:00+09:00'],
975
+ dtype='datetime64[ns, Asia/Tokyo]', freq='D')
976
+
977
+ `inclusive` controls whether to include `start` and `end` that are on the
978
+ boundary. The default, "both", includes boundary points on either end.
979
+
980
+ >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive="both")
981
+ DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
982
+ dtype='datetime64[ns]', freq='D')
983
+
984
+ Use ``inclusive='left'`` to exclude `end` if it falls on the boundary.
985
+
986
+ >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='left')
987
+ DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
988
+ dtype='datetime64[ns]', freq='D')
989
+
990
+ Use ``inclusive='right'`` to exclude `start` if it falls on the boundary, and
991
+ similarly ``inclusive='neither'`` will exclude both `start` and `end`.
992
+
993
+ >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='right')
994
+ DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
995
+ dtype='datetime64[ns]', freq='D')
996
+
997
+ **Specify a unit**
998
+
999
+ >>> pd.date_range(start="2017-01-01", periods=10, freq="100YS", unit="s")
1000
+ DatetimeIndex(['2017-01-01', '2117-01-01', '2217-01-01', '2317-01-01',
1001
+ '2417-01-01', '2517-01-01', '2617-01-01', '2717-01-01',
1002
+ '2817-01-01', '2917-01-01'],
1003
+ dtype='datetime64[s]', freq='100YS-JAN')
1004
+ """
1005
+ if freq is None and com.any_none(periods, start, end):
1006
+ freq = "D"
1007
+
1008
+ dtarr = DatetimeArray._generate_range(
1009
+ start=start,
1010
+ end=end,
1011
+ periods=periods,
1012
+ freq=freq,
1013
+ tz=tz,
1014
+ normalize=normalize,
1015
+ inclusive=inclusive,
1016
+ unit=unit,
1017
+ **kwargs,
1018
+ )
1019
+ return DatetimeIndex._simple_new(dtarr, name=name)
1020
+
1021
+
1022
+ def bdate_range(
1023
+ start=None,
1024
+ end=None,
1025
+ periods: int | None = None,
1026
+ freq: Frequency | dt.timedelta = "B",
1027
+ tz=None,
1028
+ normalize: bool = True,
1029
+ name: Hashable | None = None,
1030
+ weekmask=None,
1031
+ holidays=None,
1032
+ inclusive: IntervalClosedType = "both",
1033
+ **kwargs,
1034
+ ) -> DatetimeIndex:
1035
+ """
1036
+ Return a fixed frequency DatetimeIndex with business day as the default.
1037
+
1038
+ Parameters
1039
+ ----------
1040
+ start : str or datetime-like, default None
1041
+ Left bound for generating dates.
1042
+ end : str or datetime-like, default None
1043
+ Right bound for generating dates.
1044
+ periods : int, default None
1045
+ Number of periods to generate.
1046
+ freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'B'
1047
+ Frequency strings can have multiples, e.g. '5h'. The default is
1048
+ business daily ('B').
1049
+ tz : str or None
1050
+ Time zone name for returning localized DatetimeIndex, for example
1051
+ Asia/Beijing.
1052
+ normalize : bool, default False
1053
+ Normalize start/end dates to midnight before generating date range.
1054
+ name : str, default None
1055
+ Name of the resulting DatetimeIndex.
1056
+ weekmask : str or None, default None
1057
+ Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
1058
+ only used when custom frequency strings are passed. The default
1059
+ value None is equivalent to 'Mon Tue Wed Thu Fri'.
1060
+ holidays : list-like or None, default None
1061
+ Dates to exclude from the set of valid business days, passed to
1062
+ ``numpy.busdaycalendar``, only used when custom frequency strings
1063
+ are passed.
1064
+ inclusive : {"both", "neither", "left", "right"}, default "both"
1065
+ Include boundaries; Whether to set each bound as closed or open.
1066
+
1067
+ .. versionadded:: 1.4.0
1068
+ **kwargs
1069
+ For compatibility. Has no effect on the result.
1070
+
1071
+ Returns
1072
+ -------
1073
+ DatetimeIndex
1074
+
1075
+ Notes
1076
+ -----
1077
+ Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,
1078
+ exactly three must be specified. Specifying ``freq`` is a requirement
1079
+ for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not
1080
+ desired.
1081
+
1082
+ To learn more about the frequency strings, please see `this link
1083
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
1084
+
1085
+ Examples
1086
+ --------
1087
+ Note how the two weekend days are skipped in the result.
1088
+
1089
+ >>> pd.bdate_range(start='1/1/2018', end='1/08/2018')
1090
+ DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
1091
+ '2018-01-05', '2018-01-08'],
1092
+ dtype='datetime64[ns]', freq='B')
1093
+ """
1094
+ if freq is None:
1095
+ msg = "freq must be specified for bdate_range; use date_range instead"
1096
+ raise TypeError(msg)
1097
+
1098
+ if isinstance(freq, str) and freq.startswith("C"):
1099
+ try:
1100
+ weekmask = weekmask or "Mon Tue Wed Thu Fri"
1101
+ freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
1102
+ except (KeyError, TypeError) as err:
1103
+ msg = f"invalid custom frequency string: {freq}"
1104
+ raise ValueError(msg) from err
1105
+ elif holidays or weekmask:
1106
+ msg = (
1107
+ "a custom frequency string is required when holidays or "
1108
+ f"weekmask are passed, got frequency {freq}"
1109
+ )
1110
+ raise ValueError(msg)
1111
+
1112
+ return date_range(
1113
+ start=start,
1114
+ end=end,
1115
+ periods=periods,
1116
+ freq=freq,
1117
+ tz=tz,
1118
+ normalize=normalize,
1119
+ name=name,
1120
+ inclusive=inclusive,
1121
+ **kwargs,
1122
+ )
1123
+
1124
+
1125
+ def _time_to_micros(time_obj: dt.time) -> int:
1126
+ seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second
1127
+ return 1_000_000 * seconds + time_obj.microsecond
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/extension.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Shared methods for Index subclasses backed by ExtensionArray.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Callable,
9
+ TypeVar,
10
+ )
11
+
12
+ from pandas.util._decorators import cache_readonly
13
+
14
+ from pandas.core.dtypes.generic import ABCDataFrame
15
+
16
+ from pandas.core.indexes.base import Index
17
+
18
+ if TYPE_CHECKING:
19
+ import numpy as np
20
+
21
+ from pandas._typing import (
22
+ ArrayLike,
23
+ npt,
24
+ )
25
+
26
+ from pandas.core.arrays import IntervalArray
27
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
28
+
29
+ _ExtensionIndexT = TypeVar("_ExtensionIndexT", bound="ExtensionIndex")
30
+
31
+
32
+ def _inherit_from_data(
33
+ name: str, delegate: type, cache: bool = False, wrap: bool = False
34
+ ):
35
+ """
36
+ Make an alias for a method of the underlying ExtensionArray.
37
+
38
+ Parameters
39
+ ----------
40
+ name : str
41
+ Name of an attribute the class should inherit from its EA parent.
42
+ delegate : class
43
+ cache : bool, default False
44
+ Whether to convert wrapped properties into cache_readonly
45
+ wrap : bool, default False
46
+ Whether to wrap the inherited result in an Index.
47
+
48
+ Returns
49
+ -------
50
+ attribute, method, property, or cache_readonly
51
+ """
52
+ attr = getattr(delegate, name)
53
+
54
+ if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor":
55
+ # getset_descriptor i.e. property defined in cython class
56
+ if cache:
57
+
58
+ def cached(self):
59
+ return getattr(self._data, name)
60
+
61
+ cached.__name__ = name
62
+ cached.__doc__ = attr.__doc__
63
+ method = cache_readonly(cached)
64
+
65
+ else:
66
+
67
+ def fget(self):
68
+ result = getattr(self._data, name)
69
+ if wrap:
70
+ if isinstance(result, type(self._data)):
71
+ return type(self)._simple_new(result, name=self.name)
72
+ elif isinstance(result, ABCDataFrame):
73
+ return result.set_index(self)
74
+ return Index(result, name=self.name)
75
+ return result
76
+
77
+ def fset(self, value) -> None:
78
+ setattr(self._data, name, value)
79
+
80
+ fget.__name__ = name
81
+ fget.__doc__ = attr.__doc__
82
+
83
+ method = property(fget, fset)
84
+
85
+ elif not callable(attr):
86
+ # just a normal attribute, no wrapping
87
+ method = attr
88
+
89
+ else:
90
+ # error: Incompatible redefinition (redefinition with type "Callable[[Any,
91
+ # VarArg(Any), KwArg(Any)], Any]", original type "property")
92
+ def method(self, *args, **kwargs): # type: ignore[misc]
93
+ if "inplace" in kwargs:
94
+ raise ValueError(f"cannot use inplace with {type(self).__name__}")
95
+ result = attr(self._data, *args, **kwargs)
96
+ if wrap:
97
+ if isinstance(result, type(self._data)):
98
+ return type(self)._simple_new(result, name=self.name)
99
+ elif isinstance(result, ABCDataFrame):
100
+ return result.set_index(self)
101
+ return Index(result, name=self.name)
102
+ return result
103
+
104
+ # error: "property" has no attribute "__name__"
105
+ method.__name__ = name # type: ignore[attr-defined]
106
+ method.__doc__ = attr.__doc__
107
+ return method
108
+
109
+
110
+ def inherit_names(
111
+ names: list[str], delegate: type, cache: bool = False, wrap: bool = False
112
+ ) -> Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]:
113
+ """
114
+ Class decorator to pin attributes from an ExtensionArray to a Index subclass.
115
+
116
+ Parameters
117
+ ----------
118
+ names : List[str]
119
+ delegate : class
120
+ cache : bool, default False
121
+ wrap : bool, default False
122
+ Whether to wrap the inherited result in an Index.
123
+ """
124
+
125
+ def wrapper(cls: type[_ExtensionIndexT]) -> type[_ExtensionIndexT]:
126
+ for name in names:
127
+ meth = _inherit_from_data(name, delegate, cache=cache, wrap=wrap)
128
+ setattr(cls, name, meth)
129
+
130
+ return cls
131
+
132
+ return wrapper
133
+
134
+
135
+ class ExtensionIndex(Index):
136
+ """
137
+ Index subclass for indexes backed by ExtensionArray.
138
+ """
139
+
140
+ # The base class already passes through to _data:
141
+ # size, __len__, dtype
142
+
143
+ _data: IntervalArray | NDArrayBackedExtensionArray
144
+
145
+ # ---------------------------------------------------------------------
146
+
147
+ def _validate_fill_value(self, value):
148
+ """
149
+ Convert value to be insertable to underlying array.
150
+ """
151
+ return self._data._validate_setitem_value(value)
152
+
153
+ @cache_readonly
154
+ def _isnan(self) -> npt.NDArray[np.bool_]:
155
+ # error: Incompatible return value type (got "ExtensionArray", expected
156
+ # "ndarray")
157
+ return self._data.isna() # type: ignore[return-value]
158
+
159
+
160
+ class NDArrayBackedExtensionIndex(ExtensionIndex):
161
+ """
162
+ Index subclass for indexes backed by NDArrayBackedExtensionArray.
163
+ """
164
+
165
+ _data: NDArrayBackedExtensionArray
166
+
167
+ def _get_engine_target(self) -> np.ndarray:
168
+ return self._data._ndarray
169
+
170
+ def _from_join_target(self, result: np.ndarray) -> ArrayLike:
171
+ assert result.dtype == self._data._ndarray.dtype
172
+ return self._data._from_backing_data(result)
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/frozen.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ frozen (immutable) data structures to support MultiIndexing
3
+
4
+ These are used for:
5
+
6
+ - .names (FrozenList)
7
+
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import (
12
+ TYPE_CHECKING,
13
+ NoReturn,
14
+ )
15
+
16
+ from pandas.core.base import PandasObject
17
+
18
+ from pandas.io.formats.printing import pprint_thing
19
+
20
+ if TYPE_CHECKING:
21
+ from pandas._typing import Self
22
+
23
+
24
+ class FrozenList(PandasObject, list):
25
+ """
26
+ Container that doesn't allow setting item *but*
27
+ because it's technically hashable, will be used
28
+ for lookups, appropriately, etc.
29
+ """
30
+
31
+ # Side note: This has to be of type list. Otherwise,
32
+ # it messes up PyTables type checks.
33
+
34
+ def union(self, other) -> FrozenList:
35
+ """
36
+ Returns a FrozenList with other concatenated to the end of self.
37
+
38
+ Parameters
39
+ ----------
40
+ other : array-like
41
+ The array-like whose elements we are concatenating.
42
+
43
+ Returns
44
+ -------
45
+ FrozenList
46
+ The collection difference between self and other.
47
+ """
48
+ if isinstance(other, tuple):
49
+ other = list(other)
50
+ return type(self)(super().__add__(other))
51
+
52
+ def difference(self, other) -> FrozenList:
53
+ """
54
+ Returns a FrozenList with elements from other removed from self.
55
+
56
+ Parameters
57
+ ----------
58
+ other : array-like
59
+ The array-like whose elements we are removing self.
60
+
61
+ Returns
62
+ -------
63
+ FrozenList
64
+ The collection difference between self and other.
65
+ """
66
+ other = set(other)
67
+ temp = [x for x in self if x not in other]
68
+ return type(self)(temp)
69
+
70
+ # TODO: Consider deprecating these in favor of `union` (xref gh-15506)
71
+ # error: Incompatible types in assignment (expression has type
72
+ # "Callable[[FrozenList, Any], FrozenList]", base class "list" defined the
73
+ # type as overloaded function)
74
+ __add__ = __iadd__ = union # type: ignore[assignment]
75
+
76
+ def __getitem__(self, n):
77
+ if isinstance(n, slice):
78
+ return type(self)(super().__getitem__(n))
79
+ return super().__getitem__(n)
80
+
81
+ def __radd__(self, other) -> Self:
82
+ if isinstance(other, tuple):
83
+ other = list(other)
84
+ return type(self)(other + list(self))
85
+
86
+ def __eq__(self, other: object) -> bool:
87
+ if isinstance(other, (tuple, FrozenList)):
88
+ other = list(other)
89
+ return super().__eq__(other)
90
+
91
+ __req__ = __eq__
92
+
93
+ def __mul__(self, other) -> Self:
94
+ return type(self)(super().__mul__(other))
95
+
96
+ __imul__ = __mul__
97
+
98
+ def __reduce__(self):
99
+ return type(self), (list(self),)
100
+
101
+ # error: Signature of "__hash__" incompatible with supertype "list"
102
+ def __hash__(self) -> int: # type: ignore[override]
103
+ return hash(tuple(self))
104
+
105
+ def _disabled(self, *args, **kwargs) -> NoReturn:
106
+ """
107
+ This method will not function because object is immutable.
108
+ """
109
+ raise TypeError(f"'{type(self).__name__}' does not support mutable operations.")
110
+
111
+ def __str__(self) -> str:
112
+ return pprint_thing(self, quote_strings=True, escape_chars=("\t", "\r", "\n"))
113
+
114
+ def __repr__(self) -> str:
115
+ return f"{type(self).__name__}({str(self)})"
116
+
117
+ __setitem__ = __setslice__ = _disabled # type: ignore[assignment]
118
+ __delitem__ = __delslice__ = _disabled
119
+ pop = append = extend = _disabled
120
+ remove = sort = insert = _disabled # type: ignore[assignment]
vlmpy310/lib/python3.10/site-packages/pandas/core/indexes/interval.py ADDED
@@ -0,0 +1,1136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ define the IntervalIndex """
2
+ from __future__ import annotations
3
+
4
+ from operator import (
5
+ le,
6
+ lt,
7
+ )
8
+ import textwrap
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Literal,
13
+ )
14
+
15
+ import numpy as np
16
+
17
+ from pandas._libs import lib
18
+ from pandas._libs.interval import (
19
+ Interval,
20
+ IntervalMixin,
21
+ IntervalTree,
22
+ )
23
+ from pandas._libs.tslibs import (
24
+ BaseOffset,
25
+ Period,
26
+ Timedelta,
27
+ Timestamp,
28
+ to_offset,
29
+ )
30
+ from pandas.errors import InvalidIndexError
31
+ from pandas.util._decorators import (
32
+ Appender,
33
+ cache_readonly,
34
+ )
35
+ from pandas.util._exceptions import rewrite_exception
36
+
37
+ from pandas.core.dtypes.cast import (
38
+ find_common_type,
39
+ infer_dtype_from_scalar,
40
+ maybe_box_datetimelike,
41
+ maybe_downcast_numeric,
42
+ maybe_upcast_numeric_to_64bit,
43
+ )
44
+ from pandas.core.dtypes.common import (
45
+ ensure_platform_int,
46
+ is_float_dtype,
47
+ is_integer,
48
+ is_integer_dtype,
49
+ is_list_like,
50
+ is_number,
51
+ is_object_dtype,
52
+ is_scalar,
53
+ pandas_dtype,
54
+ )
55
+ from pandas.core.dtypes.dtypes import (
56
+ DatetimeTZDtype,
57
+ IntervalDtype,
58
+ )
59
+ from pandas.core.dtypes.missing import is_valid_na_for_dtype
60
+
61
+ from pandas.core.algorithms import unique
62
+ from pandas.core.arrays.datetimelike import validate_periods
63
+ from pandas.core.arrays.interval import (
64
+ IntervalArray,
65
+ _interval_shared_docs,
66
+ )
67
+ import pandas.core.common as com
68
+ from pandas.core.indexers import is_valid_positional_slice
69
+ import pandas.core.indexes.base as ibase
70
+ from pandas.core.indexes.base import (
71
+ Index,
72
+ _index_shared_docs,
73
+ ensure_index,
74
+ maybe_extract_name,
75
+ )
76
+ from pandas.core.indexes.datetimes import (
77
+ DatetimeIndex,
78
+ date_range,
79
+ )
80
+ from pandas.core.indexes.extension import (
81
+ ExtensionIndex,
82
+ inherit_names,
83
+ )
84
+ from pandas.core.indexes.multi import MultiIndex
85
+ from pandas.core.indexes.timedeltas import (
86
+ TimedeltaIndex,
87
+ timedelta_range,
88
+ )
89
+
90
+ if TYPE_CHECKING:
91
+ from collections.abc import Hashable
92
+
93
+ from pandas._typing import (
94
+ Dtype,
95
+ DtypeObj,
96
+ IntervalClosedType,
97
+ Self,
98
+ npt,
99
+ )
100
+ _index_doc_kwargs = dict(ibase._index_doc_kwargs)
101
+
102
+ _index_doc_kwargs.update(
103
+ {
104
+ "klass": "IntervalIndex",
105
+ "qualname": "IntervalIndex",
106
+ "target_klass": "IntervalIndex or list of Intervals",
107
+ "name": textwrap.dedent(
108
+ """\
109
+ name : object, optional
110
+ Name to be stored in the index.
111
+ """
112
+ ),
113
+ }
114
+ )
115
+
116
+
117
+ def _get_next_label(label):
118
+ # see test_slice_locs_with_ints_and_floats_succeeds
119
+ dtype = getattr(label, "dtype", type(label))
120
+ if isinstance(label, (Timestamp, Timedelta)):
121
+ dtype = "datetime64[ns]"
122
+ dtype = pandas_dtype(dtype)
123
+
124
+ if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype):
125
+ return label + np.timedelta64(1, "ns")
126
+ elif is_integer_dtype(dtype):
127
+ return label + 1
128
+ elif is_float_dtype(dtype):
129
+ return np.nextafter(label, np.inf)
130
+ else:
131
+ raise TypeError(f"cannot determine next label for type {repr(type(label))}")
132
+
133
+
134
+ def _get_prev_label(label):
135
+ # see test_slice_locs_with_ints_and_floats_succeeds
136
+ dtype = getattr(label, "dtype", type(label))
137
+ if isinstance(label, (Timestamp, Timedelta)):
138
+ dtype = "datetime64[ns]"
139
+ dtype = pandas_dtype(dtype)
140
+
141
+ if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype):
142
+ return label - np.timedelta64(1, "ns")
143
+ elif is_integer_dtype(dtype):
144
+ return label - 1
145
+ elif is_float_dtype(dtype):
146
+ return np.nextafter(label, -np.inf)
147
+ else:
148
+ raise TypeError(f"cannot determine next label for type {repr(type(label))}")
149
+
150
+
151
+ def _new_IntervalIndex(cls, d):
152
+ """
153
+ This is called upon unpickling, rather than the default which doesn't have
154
+ arguments and breaks __new__.
155
+ """
156
+ return cls.from_arrays(**d)
157
+
158
+
159
+ @Appender(
160
+ _interval_shared_docs["class"]
161
+ % {
162
+ "klass": "IntervalIndex",
163
+ "summary": "Immutable index of intervals that are closed on the same side.",
164
+ "name": _index_doc_kwargs["name"],
165
+ "extra_attributes": "is_overlapping\nvalues\n",
166
+ "extra_methods": "",
167
+ "examples": textwrap.dedent(
168
+ """\
169
+ Examples
170
+ --------
171
+ A new ``IntervalIndex`` is typically constructed using
172
+ :func:`interval_range`:
173
+
174
+ >>> pd.interval_range(start=0, end=5)
175
+ IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
176
+ dtype='interval[int64, right]')
177
+
178
+ It may also be constructed using one of the constructor
179
+ methods: :meth:`IntervalIndex.from_arrays`,
180
+ :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
181
+
182
+ See further examples in the doc strings of ``interval_range`` and the
183
+ mentioned constructor methods.
184
+ """
185
+ ),
186
+ }
187
+ )
188
+ @inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)
189
+ @inherit_names(
190
+ [
191
+ "__array__",
192
+ "overlaps",
193
+ "contains",
194
+ "closed_left",
195
+ "closed_right",
196
+ "open_left",
197
+ "open_right",
198
+ "is_empty",
199
+ ],
200
+ IntervalArray,
201
+ )
202
+ @inherit_names(["is_non_overlapping_monotonic", "closed"], IntervalArray, cache=True)
203
+ class IntervalIndex(ExtensionIndex):
204
+ _typ = "intervalindex"
205
+
206
+ # annotate properties pinned via inherit_names
207
+ closed: IntervalClosedType
208
+ is_non_overlapping_monotonic: bool
209
+ closed_left: bool
210
+ closed_right: bool
211
+ open_left: bool
212
+ open_right: bool
213
+
214
+ _data: IntervalArray
215
+ _values: IntervalArray
216
+ _can_hold_strings = False
217
+ _data_cls = IntervalArray
218
+
219
+ # --------------------------------------------------------------------
220
+ # Constructors
221
+
222
+ def __new__(
223
+ cls,
224
+ data,
225
+ closed: IntervalClosedType | None = None,
226
+ dtype: Dtype | None = None,
227
+ copy: bool = False,
228
+ name: Hashable | None = None,
229
+ verify_integrity: bool = True,
230
+ ) -> Self:
231
+ name = maybe_extract_name(name, data, cls)
232
+
233
+ with rewrite_exception("IntervalArray", cls.__name__):
234
+ array = IntervalArray(
235
+ data,
236
+ closed=closed,
237
+ copy=copy,
238
+ dtype=dtype,
239
+ verify_integrity=verify_integrity,
240
+ )
241
+
242
+ return cls._simple_new(array, name)
243
+
244
+ @classmethod
245
+ @Appender(
246
+ _interval_shared_docs["from_breaks"]
247
+ % {
248
+ "klass": "IntervalIndex",
249
+ "name": textwrap.dedent(
250
+ """
251
+ name : str, optional
252
+ Name of the resulting IntervalIndex."""
253
+ ),
254
+ "examples": textwrap.dedent(
255
+ """\
256
+ Examples
257
+ --------
258
+ >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
259
+ IntervalIndex([(0, 1], (1, 2], (2, 3]],
260
+ dtype='interval[int64, right]')
261
+ """
262
+ ),
263
+ }
264
+ )
265
+ def from_breaks(
266
+ cls,
267
+ breaks,
268
+ closed: IntervalClosedType | None = "right",
269
+ name: Hashable | None = None,
270
+ copy: bool = False,
271
+ dtype: Dtype | None = None,
272
+ ) -> IntervalIndex:
273
+ with rewrite_exception("IntervalArray", cls.__name__):
274
+ array = IntervalArray.from_breaks(
275
+ breaks, closed=closed, copy=copy, dtype=dtype
276
+ )
277
+ return cls._simple_new(array, name=name)
278
+
279
+ @classmethod
280
+ @Appender(
281
+ _interval_shared_docs["from_arrays"]
282
+ % {
283
+ "klass": "IntervalIndex",
284
+ "name": textwrap.dedent(
285
+ """
286
+ name : str, optional
287
+ Name of the resulting IntervalIndex."""
288
+ ),
289
+ "examples": textwrap.dedent(
290
+ """\
291
+ Examples
292
+ --------
293
+ >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
294
+ IntervalIndex([(0, 1], (1, 2], (2, 3]],
295
+ dtype='interval[int64, right]')
296
+ """
297
+ ),
298
+ }
299
+ )
300
+ def from_arrays(
301
+ cls,
302
+ left,
303
+ right,
304
+ closed: IntervalClosedType = "right",
305
+ name: Hashable | None = None,
306
+ copy: bool = False,
307
+ dtype: Dtype | None = None,
308
+ ) -> IntervalIndex:
309
+ with rewrite_exception("IntervalArray", cls.__name__):
310
+ array = IntervalArray.from_arrays(
311
+ left, right, closed, copy=copy, dtype=dtype
312
+ )
313
+ return cls._simple_new(array, name=name)
314
+
315
+ @classmethod
316
+ @Appender(
317
+ _interval_shared_docs["from_tuples"]
318
+ % {
319
+ "klass": "IntervalIndex",
320
+ "name": textwrap.dedent(
321
+ """
322
+ name : str, optional
323
+ Name of the resulting IntervalIndex."""
324
+ ),
325
+ "examples": textwrap.dedent(
326
+ """\
327
+ Examples
328
+ --------
329
+ >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
330
+ IntervalIndex([(0, 1], (1, 2]],
331
+ dtype='interval[int64, right]')
332
+ """
333
+ ),
334
+ }
335
+ )
336
+ def from_tuples(
337
+ cls,
338
+ data,
339
+ closed: IntervalClosedType = "right",
340
+ name: Hashable | None = None,
341
+ copy: bool = False,
342
+ dtype: Dtype | None = None,
343
+ ) -> IntervalIndex:
344
+ with rewrite_exception("IntervalArray", cls.__name__):
345
+ arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
346
+ return cls._simple_new(arr, name=name)
347
+
348
+ # --------------------------------------------------------------------
349
+ # error: Return type "IntervalTree" of "_engine" incompatible with return type
350
+ # "Union[IndexEngine, ExtensionEngine]" in supertype "Index"
351
+ @cache_readonly
352
+ def _engine(self) -> IntervalTree: # type: ignore[override]
353
+ # IntervalTree does not supports numpy array unless they are 64 bit
354
+ left = self._maybe_convert_i8(self.left)
355
+ left = maybe_upcast_numeric_to_64bit(left)
356
+ right = self._maybe_convert_i8(self.right)
357
+ right = maybe_upcast_numeric_to_64bit(right)
358
+ return IntervalTree(left, right, closed=self.closed)
359
+
360
+ def __contains__(self, key: Any) -> bool:
361
+ """
362
+ return a boolean if this key is IN the index
363
+ We *only* accept an Interval
364
+
365
+ Parameters
366
+ ----------
367
+ key : Interval
368
+
369
+ Returns
370
+ -------
371
+ bool
372
+ """
373
+ hash(key)
374
+ if not isinstance(key, Interval):
375
+ if is_valid_na_for_dtype(key, self.dtype):
376
+ return self.hasnans
377
+ return False
378
+
379
+ try:
380
+ self.get_loc(key)
381
+ return True
382
+ except KeyError:
383
+ return False
384
+
385
+ def _getitem_slice(self, slobj: slice) -> IntervalIndex:
386
+ """
387
+ Fastpath for __getitem__ when we know we have a slice.
388
+ """
389
+ res = self._data[slobj]
390
+ return type(self)._simple_new(res, name=self._name)
391
+
392
+ @cache_readonly
393
+ def _multiindex(self) -> MultiIndex:
394
+ return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])
395
+
396
+ def __reduce__(self):
397
+ d = {
398
+ "left": self.left,
399
+ "right": self.right,
400
+ "closed": self.closed,
401
+ "name": self.name,
402
+ }
403
+ return _new_IntervalIndex, (type(self), d), None
404
+
405
+ @property
406
+ def inferred_type(self) -> str:
407
+ """Return a string of the type inferred from the values"""
408
+ return "interval"
409
+
410
+ # Cannot determine type of "memory_usage"
411
+ @Appender(Index.memory_usage.__doc__) # type: ignore[has-type]
412
+ def memory_usage(self, deep: bool = False) -> int:
413
+ # we don't use an explicit engine
414
+ # so return the bytes here
415
+ return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
416
+
417
+ # IntervalTree doesn't have a is_monotonic_decreasing, so have to override
418
+ # the Index implementation
419
+ @cache_readonly
420
+ def is_monotonic_decreasing(self) -> bool:
421
+ """
422
+ Return True if the IntervalIndex is monotonic decreasing (only equal or
423
+ decreasing values), else False
424
+ """
425
+ return self[::-1].is_monotonic_increasing
426
+
427
+ @cache_readonly
428
+ def is_unique(self) -> bool:
429
+ """
430
+ Return True if the IntervalIndex contains unique elements, else False.
431
+ """
432
+ left = self.left
433
+ right = self.right
434
+
435
+ if self.isna().sum() > 1:
436
+ return False
437
+
438
+ if left.is_unique or right.is_unique:
439
+ return True
440
+
441
+ seen_pairs = set()
442
+ check_idx = np.where(left.duplicated(keep=False))[0]
443
+ for idx in check_idx:
444
+ pair = (left[idx], right[idx])
445
+ if pair in seen_pairs:
446
+ return False
447
+ seen_pairs.add(pair)
448
+
449
+ return True
450
+
451
+ @property
452
+ def is_overlapping(self) -> bool:
453
+ """
454
+ Return True if the IntervalIndex has overlapping intervals, else False.
455
+
456
+ Two intervals overlap if they share a common point, including closed
457
+ endpoints. Intervals that only have an open endpoint in common do not
458
+ overlap.
459
+
460
+ Returns
461
+ -------
462
+ bool
463
+ Boolean indicating if the IntervalIndex has overlapping intervals.
464
+
465
+ See Also
466
+ --------
467
+ Interval.overlaps : Check whether two Interval objects overlap.
468
+ IntervalIndex.overlaps : Check an IntervalIndex elementwise for
469
+ overlaps.
470
+
471
+ Examples
472
+ --------
473
+ >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
474
+ >>> index
475
+ IntervalIndex([(0, 2], (1, 3], (4, 5]],
476
+ dtype='interval[int64, right]')
477
+ >>> index.is_overlapping
478
+ True
479
+
480
+ Intervals that share closed endpoints overlap:
481
+
482
+ >>> index = pd.interval_range(0, 3, closed='both')
483
+ >>> index
484
+ IntervalIndex([[0, 1], [1, 2], [2, 3]],
485
+ dtype='interval[int64, both]')
486
+ >>> index.is_overlapping
487
+ True
488
+
489
+ Intervals that only have an open endpoint in common do not overlap:
490
+
491
+ >>> index = pd.interval_range(0, 3, closed='left')
492
+ >>> index
493
+ IntervalIndex([[0, 1), [1, 2), [2, 3)],
494
+ dtype='interval[int64, left]')
495
+ >>> index.is_overlapping
496
+ False
497
+ """
498
+ # GH 23309
499
+ return self._engine.is_overlapping
500
+
501
+ def _needs_i8_conversion(self, key) -> bool:
502
+ """
503
+ Check if a given key needs i8 conversion. Conversion is necessary for
504
+ Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
505
+ Interval-like requires conversion if its endpoints are one of the
506
+ aforementioned types.
507
+
508
+ Assumes that any list-like data has already been cast to an Index.
509
+
510
+ Parameters
511
+ ----------
512
+ key : scalar or Index-like
513
+ The key that should be checked for i8 conversion
514
+
515
+ Returns
516
+ -------
517
+ bool
518
+ """
519
+ key_dtype = getattr(key, "dtype", None)
520
+ if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
521
+ return self._needs_i8_conversion(key.left)
522
+
523
+ i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
524
+ return isinstance(key, i8_types)
525
+
526
+ def _maybe_convert_i8(self, key):
527
+ """
528
+ Maybe convert a given key to its equivalent i8 value(s). Used as a
529
+ preprocessing step prior to IntervalTree queries (self._engine), which
530
+ expects numeric data.
531
+
532
+ Parameters
533
+ ----------
534
+ key : scalar or list-like
535
+ The key that should maybe be converted to i8.
536
+
537
+ Returns
538
+ -------
539
+ scalar or list-like
540
+ The original key if no conversion occurred, int if converted scalar,
541
+ Index with an int64 dtype if converted list-like.
542
+ """
543
+ if is_list_like(key):
544
+ key = ensure_index(key)
545
+ key = maybe_upcast_numeric_to_64bit(key)
546
+
547
+ if not self._needs_i8_conversion(key):
548
+ return key
549
+
550
+ scalar = is_scalar(key)
551
+ key_dtype = getattr(key, "dtype", None)
552
+ if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
553
+ # convert left/right and reconstruct
554
+ left = self._maybe_convert_i8(key.left)
555
+ right = self._maybe_convert_i8(key.right)
556
+ constructor = Interval if scalar else IntervalIndex.from_arrays
557
+ # error: "object" not callable
558
+ return constructor(
559
+ left, right, closed=self.closed
560
+ ) # type: ignore[operator]
561
+
562
+ if scalar:
563
+ # Timestamp/Timedelta
564
+ key_dtype, key_i8 = infer_dtype_from_scalar(key)
565
+ if isinstance(key, Period):
566
+ key_i8 = key.ordinal
567
+ elif isinstance(key_i8, Timestamp):
568
+ key_i8 = key_i8._value
569
+ elif isinstance(key_i8, (np.datetime64, np.timedelta64)):
570
+ key_i8 = key_i8.view("i8")
571
+ else:
572
+ # DatetimeIndex/TimedeltaIndex
573
+ key_dtype, key_i8 = key.dtype, Index(key.asi8)
574
+ if key.hasnans:
575
+ # convert NaT from its i8 value to np.nan so it's not viewed
576
+ # as a valid value, maybe causing errors (e.g. is_overlapping)
577
+ key_i8 = key_i8.where(~key._isnan)
578
+
579
+ # ensure consistency with IntervalIndex subtype
580
+ # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
581
+ # ExtensionDtype]" has no attribute "subtype"
582
+ subtype = self.dtype.subtype # type: ignore[union-attr]
583
+
584
+ if subtype != key_dtype:
585
+ raise ValueError(
586
+ f"Cannot index an IntervalIndex of subtype {subtype} with "
587
+ f"values of dtype {key_dtype}"
588
+ )
589
+
590
+ return key_i8
591
+
592
+ def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"):
593
+ if not self.is_non_overlapping_monotonic:
594
+ raise KeyError(
595
+ "can only get slices from an IntervalIndex if bounds are "
596
+ "non-overlapping and all monotonic increasing or decreasing"
597
+ )
598
+
599
+ if isinstance(label, (IntervalMixin, IntervalIndex)):
600
+ raise NotImplementedError("Interval objects are not currently supported")
601
+
602
+ # GH 20921: "not is_monotonic_increasing" for the second condition
603
+ # instead of "is_monotonic_decreasing" to account for single element
604
+ # indexes being both increasing and decreasing
605
+ if (side == "left" and self.left.is_monotonic_increasing) or (
606
+ side == "right" and not self.left.is_monotonic_increasing
607
+ ):
608
+ sub_idx = self.right
609
+ if self.open_right:
610
+ label = _get_next_label(label)
611
+ else:
612
+ sub_idx = self.left
613
+ if self.open_left:
614
+ label = _get_prev_label(label)
615
+
616
+ return sub_idx._searchsorted_monotonic(label, side)
617
+
618
+ # --------------------------------------------------------------------
619
+ # Indexing Methods
620
+
621
+ def get_loc(self, key) -> int | slice | np.ndarray:
622
+ """
623
+ Get integer location, slice or boolean mask for requested label.
624
+
625
+ Parameters
626
+ ----------
627
+ key : label
628
+
629
+ Returns
630
+ -------
631
+ int if unique index, slice if monotonic index, else mask
632
+
633
+ Examples
634
+ --------
635
+ >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
636
+ >>> index = pd.IntervalIndex([i1, i2])
637
+ >>> index.get_loc(1)
638
+ 0
639
+
640
+ You can also supply a point inside an interval.
641
+
642
+ >>> index.get_loc(1.5)
643
+ 1
644
+
645
+ If a label is in several intervals, you get the locations of all the
646
+ relevant intervals.
647
+
648
+ >>> i3 = pd.Interval(0, 2)
649
+ >>> overlapping_index = pd.IntervalIndex([i1, i2, i3])
650
+ >>> overlapping_index.get_loc(0.5)
651
+ array([ True, False, True])
652
+
653
+ Only exact matches will be returned if an interval is provided.
654
+
655
+ >>> index.get_loc(pd.Interval(0, 1))
656
+ 0
657
+ """
658
+ self._check_indexing_error(key)
659
+
660
+ if isinstance(key, Interval):
661
+ if self.closed != key.closed:
662
+ raise KeyError(key)
663
+ mask = (self.left == key.left) & (self.right == key.right)
664
+ elif is_valid_na_for_dtype(key, self.dtype):
665
+ mask = self.isna()
666
+ else:
667
+ # assume scalar
668
+ op_left = le if self.closed_left else lt
669
+ op_right = le if self.closed_right else lt
670
+ try:
671
+ mask = op_left(self.left, key) & op_right(key, self.right)
672
+ except TypeError as err:
673
+ # scalar is not comparable to II subtype --> invalid label
674
+ raise KeyError(key) from err
675
+
676
+ matches = mask.sum()
677
+ if matches == 0:
678
+ raise KeyError(key)
679
+ if matches == 1:
680
+ return mask.argmax()
681
+
682
+ res = lib.maybe_booleans_to_slice(mask.view("u1"))
683
+ if isinstance(res, slice) and res.stop is None:
684
+ # TODO: DO this in maybe_booleans_to_slice?
685
+ res = slice(res.start, len(self), res.step)
686
+ return res
687
+
688
+ def _get_indexer(
689
+ self,
690
+ target: Index,
691
+ method: str | None = None,
692
+ limit: int | None = None,
693
+ tolerance: Any | None = None,
694
+ ) -> npt.NDArray[np.intp]:
695
+ if isinstance(target, IntervalIndex):
696
+ # We only get here with not self.is_overlapping
697
+ # -> at most one match per interval in target
698
+ # want exact matches -> need both left/right to match, so defer to
699
+ # left/right get_indexer, compare elementwise, equality -> match
700
+ indexer = self._get_indexer_unique_sides(target)
701
+
702
+ elif not is_object_dtype(target.dtype):
703
+ # homogeneous scalar index: use IntervalTree
704
+ # we should always have self._should_partial_index(target) here
705
+ target = self._maybe_convert_i8(target)
706
+ indexer = self._engine.get_indexer(target.values)
707
+ else:
708
+ # heterogeneous scalar index: defer elementwise to get_loc
709
+ # we should always have self._should_partial_index(target) here
710
+ return self._get_indexer_pointwise(target)[0]
711
+
712
+ return ensure_platform_int(indexer)
713
+
714
+ @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
715
+ def get_indexer_non_unique(
716
+ self, target: Index
717
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
718
+ target = ensure_index(target)
719
+
720
+ if not self._should_compare(target) and not self._should_partial_index(target):
721
+ # e.g. IntervalIndex with different closed or incompatible subtype
722
+ # -> no matches
723
+ return self._get_indexer_non_comparable(target, None, unique=False)
724
+
725
+ elif isinstance(target, IntervalIndex):
726
+ if self.left.is_unique and self.right.is_unique:
727
+ # fastpath available even if we don't have self._index_as_unique
728
+ indexer = self._get_indexer_unique_sides(target)
729
+ missing = (indexer == -1).nonzero()[0]
730
+ else:
731
+ return self._get_indexer_pointwise(target)
732
+
733
+ elif is_object_dtype(target.dtype) or not self._should_partial_index(target):
734
+ # target might contain intervals: defer elementwise to get_loc
735
+ return self._get_indexer_pointwise(target)
736
+
737
+ else:
738
+ # Note: this case behaves differently from other Index subclasses
739
+ # because IntervalIndex does partial-int indexing
740
+ target = self._maybe_convert_i8(target)
741
+ indexer, missing = self._engine.get_indexer_non_unique(target.values)
742
+
743
+ return ensure_platform_int(indexer), ensure_platform_int(missing)
744
+
745
+ def _get_indexer_unique_sides(self, target: IntervalIndex) -> npt.NDArray[np.intp]:
746
+ """
747
+ _get_indexer specialized to the case where both of our sides are unique.
748
+ """
749
+ # Caller is responsible for checking
750
+ # `self.left.is_unique and self.right.is_unique`
751
+
752
+ left_indexer = self.left.get_indexer(target.left)
753
+ right_indexer = self.right.get_indexer(target.right)
754
+ indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
755
+ return indexer
756
+
757
+ def _get_indexer_pointwise(
758
+ self, target: Index
759
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
760
+ """
761
+ pointwise implementation for get_indexer and get_indexer_non_unique.
762
+ """
763
+ indexer, missing = [], []
764
+ for i, key in enumerate(target):
765
+ try:
766
+ locs = self.get_loc(key)
767
+ if isinstance(locs, slice):
768
+ # Only needed for get_indexer_non_unique
769
+ locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")
770
+ elif lib.is_integer(locs):
771
+ locs = np.array(locs, ndmin=1)
772
+ else:
773
+ # otherwise we have ndarray[bool]
774
+ locs = np.where(locs)[0]
775
+ except KeyError:
776
+ missing.append(i)
777
+ locs = np.array([-1])
778
+ except InvalidIndexError:
779
+ # i.e. non-scalar key e.g. a tuple.
780
+ # see test_append_different_columns_types_raises
781
+ missing.append(i)
782
+ locs = np.array([-1])
783
+
784
+ indexer.append(locs)
785
+
786
+ indexer = np.concatenate(indexer)
787
+ return ensure_platform_int(indexer), ensure_platform_int(missing)
788
+
789
+ @cache_readonly
790
+ def _index_as_unique(self) -> bool:
791
+ return not self.is_overlapping and self._engine._na_count < 2
792
+
793
+ _requires_unique_msg = (
794
+ "cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique"
795
+ )
796
+
797
+ def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]):
798
+ if not (key.step is None or key.step == 1):
799
+ # GH#31658 if label-based, we require step == 1,
800
+ # if positional, we disallow float start/stop
801
+ msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
802
+ if kind == "loc":
803
+ raise ValueError(msg)
804
+ if kind == "getitem":
805
+ if not is_valid_positional_slice(key):
806
+ # i.e. this cannot be interpreted as a positional slice
807
+ raise ValueError(msg)
808
+
809
+ return super()._convert_slice_indexer(key, kind)
810
+
811
+ @cache_readonly
812
+ def _should_fallback_to_positional(self) -> bool:
813
+ # integer lookups in Series.__getitem__ are unambiguously
814
+ # positional in this case
815
+ # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
816
+ # ExtensionDtype]" has no attribute "subtype"
817
+ return self.dtype.subtype.kind in "mM" # type: ignore[union-attr]
818
+
819
+ def _maybe_cast_slice_bound(self, label, side: str):
820
+ return getattr(self, side)._maybe_cast_slice_bound(label, side)
821
+
822
+ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
823
+ if not isinstance(dtype, IntervalDtype):
824
+ return False
825
+ common_subtype = find_common_type([self.dtype, dtype])
826
+ return not is_object_dtype(common_subtype)
827
+
828
+ # --------------------------------------------------------------------
829
+
830
+ @cache_readonly
831
+ def left(self) -> Index:
832
+ return Index(self._data.left, copy=False)
833
+
834
+ @cache_readonly
835
+ def right(self) -> Index:
836
+ return Index(self._data.right, copy=False)
837
+
838
+ @cache_readonly
839
+ def mid(self) -> Index:
840
+ return Index(self._data.mid, copy=False)
841
+
842
+ @property
843
+ def length(self) -> Index:
844
+ return Index(self._data.length, copy=False)
845
+
846
+ # --------------------------------------------------------------------
847
+ # Set Operations
848
+
849
+ def _intersection(self, other, sort):
850
+ """
851
+ intersection specialized to the case with matching dtypes.
852
+ """
853
+ # For IntervalIndex we also know other.closed == self.closed
854
+ if self.left.is_unique and self.right.is_unique:
855
+ taken = self._intersection_unique(other)
856
+ elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:
857
+ # Swap other/self if other is unique and self does not have
858
+ # multiple NaNs
859
+ taken = other._intersection_unique(self)
860
+ else:
861
+ # duplicates
862
+ taken = self._intersection_non_unique(other)
863
+
864
+ if sort is None:
865
+ taken = taken.sort_values()
866
+
867
+ return taken
868
+
869
+ def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex:
870
+ """
871
+ Used when the IntervalIndex does not have any common endpoint,
872
+ no matter left or right.
873
+ Return the intersection with another IntervalIndex.
874
+ Parameters
875
+ ----------
876
+ other : IntervalIndex
877
+ Returns
878
+ -------
879
+ IntervalIndex
880
+ """
881
+ # Note: this is much more performant than super()._intersection(other)
882
+ lindexer = self.left.get_indexer(other.left)
883
+ rindexer = self.right.get_indexer(other.right)
884
+
885
+ match = (lindexer == rindexer) & (lindexer != -1)
886
+ indexer = lindexer.take(match.nonzero()[0])
887
+ indexer = unique(indexer)
888
+
889
+ return self.take(indexer)
890
+
891
+ def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex:
892
+ """
893
+ Used when the IntervalIndex does have some common endpoints,
894
+ on either sides.
895
+ Return the intersection with another IntervalIndex.
896
+
897
+ Parameters
898
+ ----------
899
+ other : IntervalIndex
900
+
901
+ Returns
902
+ -------
903
+ IntervalIndex
904
+ """
905
+ # Note: this is about 3.25x faster than super()._intersection(other)
906
+ # in IntervalIndexMethod.time_intersection_both_duplicate(1000)
907
+ mask = np.zeros(len(self), dtype=bool)
908
+
909
+ if self.hasnans and other.hasnans:
910
+ first_nan_loc = np.arange(len(self))[self.isna()][0]
911
+ mask[first_nan_loc] = True
912
+
913
+ other_tups = set(zip(other.left, other.right))
914
+ for i, tup in enumerate(zip(self.left, self.right)):
915
+ if tup in other_tups:
916
+ mask[i] = True
917
+
918
+ return self[mask]
919
+
920
+ # --------------------------------------------------------------------
921
+
922
+ def _get_engine_target(self) -> np.ndarray:
923
+ # Note: we _could_ use libjoin functions by either casting to object
924
+ # dtype or constructing tuples (faster than constructing Intervals)
925
+ # but the libjoin fastpaths are no longer fast in these cases.
926
+ raise NotImplementedError(
927
+ "IntervalIndex does not use libjoin fastpaths or pass values to "
928
+ "IndexEngine objects"
929
+ )
930
+
931
+ def _from_join_target(self, result):
932
+ raise NotImplementedError("IntervalIndex does not use libjoin fastpaths")
933
+
934
+ # TODO: arithmetic operations
935
+
936
+
937
+ def _is_valid_endpoint(endpoint) -> bool:
938
+ """
939
+ Helper for interval_range to check if start/end are valid types.
940
+ """
941
+ return any(
942
+ [
943
+ is_number(endpoint),
944
+ isinstance(endpoint, Timestamp),
945
+ isinstance(endpoint, Timedelta),
946
+ endpoint is None,
947
+ ]
948
+ )
949
+
950
+
951
+ def _is_type_compatible(a, b) -> bool:
952
+ """
953
+ Helper for interval_range to check type compat of start/end/freq.
954
+ """
955
+ is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))
956
+ is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))
957
+ return (
958
+ (is_number(a) and is_number(b))
959
+ or (is_ts_compat(a) and is_ts_compat(b))
960
+ or (is_td_compat(a) and is_td_compat(b))
961
+ or com.any_none(a, b)
962
+ )
963
+
964
+
965
+ def interval_range(
966
+ start=None,
967
+ end=None,
968
+ periods=None,
969
+ freq=None,
970
+ name: Hashable | None = None,
971
+ closed: IntervalClosedType = "right",
972
+ ) -> IntervalIndex:
973
+ """
974
+ Return a fixed frequency IntervalIndex.
975
+
976
+ Parameters
977
+ ----------
978
+ start : numeric or datetime-like, default None
979
+ Left bound for generating intervals.
980
+ end : numeric or datetime-like, default None
981
+ Right bound for generating intervals.
982
+ periods : int, default None
983
+ Number of periods to generate.
984
+ freq : numeric, str, Timedelta, datetime.timedelta, or DateOffset, default None
985
+ The length of each interval. Must be consistent with the type of start
986
+ and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
987
+ for numeric and 'D' for datetime-like.
988
+ name : str, default None
989
+ Name of the resulting IntervalIndex.
990
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
991
+ Whether the intervals are closed on the left-side, right-side, both
992
+ or neither.
993
+
994
+ Returns
995
+ -------
996
+ IntervalIndex
997
+
998
+ See Also
999
+ --------
1000
+ IntervalIndex : An Index of intervals that are all closed on the same side.
1001
+
1002
+ Notes
1003
+ -----
1004
+ Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
1005
+ exactly three must be specified. If ``freq`` is omitted, the resulting
1006
+ ``IntervalIndex`` will have ``periods`` linearly spaced elements between
1007
+ ``start`` and ``end``, inclusively.
1008
+
1009
+ To learn more about datetime-like frequency strings, please see `this link
1010
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
1011
+
1012
+ Examples
1013
+ --------
1014
+ Numeric ``start`` and ``end`` is supported.
1015
+
1016
+ >>> pd.interval_range(start=0, end=5)
1017
+ IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
1018
+ dtype='interval[int64, right]')
1019
+
1020
+ Additionally, datetime-like input is also supported.
1021
+
1022
+ >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
1023
+ ... end=pd.Timestamp('2017-01-04'))
1024
+ IntervalIndex([(2017-01-01 00:00:00, 2017-01-02 00:00:00],
1025
+ (2017-01-02 00:00:00, 2017-01-03 00:00:00],
1026
+ (2017-01-03 00:00:00, 2017-01-04 00:00:00]],
1027
+ dtype='interval[datetime64[ns], right]')
1028
+
1029
+ The ``freq`` parameter specifies the frequency between the left and right.
1030
+ endpoints of the individual intervals within the ``IntervalIndex``. For
1031
+ numeric ``start`` and ``end``, the frequency must also be numeric.
1032
+
1033
+ >>> pd.interval_range(start=0, periods=4, freq=1.5)
1034
+ IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
1035
+ dtype='interval[float64, right]')
1036
+
1037
+ Similarly, for datetime-like ``start`` and ``end``, the frequency must be
1038
+ convertible to a DateOffset.
1039
+
1040
+ >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
1041
+ ... periods=3, freq='MS')
1042
+ IntervalIndex([(2017-01-01 00:00:00, 2017-02-01 00:00:00],
1043
+ (2017-02-01 00:00:00, 2017-03-01 00:00:00],
1044
+ (2017-03-01 00:00:00, 2017-04-01 00:00:00]],
1045
+ dtype='interval[datetime64[ns], right]')
1046
+
1047
+ Specify ``start``, ``end``, and ``periods``; the frequency is generated
1048
+ automatically (linearly spaced).
1049
+
1050
+ >>> pd.interval_range(start=0, end=6, periods=4)
1051
+ IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
1052
+ dtype='interval[float64, right]')
1053
+
1054
+ The ``closed`` parameter specifies which endpoints of the individual
1055
+ intervals within the ``IntervalIndex`` are closed.
1056
+
1057
+ >>> pd.interval_range(end=5, periods=4, closed='both')
1058
+ IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
1059
+ dtype='interval[int64, both]')
1060
+ """
1061
+ start = maybe_box_datetimelike(start)
1062
+ end = maybe_box_datetimelike(end)
1063
+ endpoint = start if start is not None else end
1064
+
1065
+ if freq is None and com.any_none(periods, start, end):
1066
+ freq = 1 if is_number(endpoint) else "D"
1067
+
1068
+ if com.count_not_none(start, end, periods, freq) != 3:
1069
+ raise ValueError(
1070
+ "Of the four parameters: start, end, periods, and "
1071
+ "freq, exactly three must be specified"
1072
+ )
1073
+
1074
+ if not _is_valid_endpoint(start):
1075
+ raise ValueError(f"start must be numeric or datetime-like, got {start}")
1076
+ if not _is_valid_endpoint(end):
1077
+ raise ValueError(f"end must be numeric or datetime-like, got {end}")
1078
+
1079
+ periods = validate_periods(periods)
1080
+
1081
+ if freq is not None and not is_number(freq):
1082
+ try:
1083
+ freq = to_offset(freq)
1084
+ except ValueError as err:
1085
+ raise ValueError(
1086
+ f"freq must be numeric or convertible to DateOffset, got {freq}"
1087
+ ) from err
1088
+
1089
+ # verify type compatibility
1090
+ if not all(
1091
+ [
1092
+ _is_type_compatible(start, end),
1093
+ _is_type_compatible(start, freq),
1094
+ _is_type_compatible(end, freq),
1095
+ ]
1096
+ ):
1097
+ raise TypeError("start, end, freq need to be type compatible")
1098
+
1099
+ # +1 to convert interval count to breaks count (n breaks = n-1 intervals)
1100
+ if periods is not None:
1101
+ periods += 1
1102
+
1103
+ breaks: np.ndarray | TimedeltaIndex | DatetimeIndex
1104
+
1105
+ if is_number(endpoint):
1106
+ if com.all_not_none(start, end, freq):
1107
+ # 0.1 ensures we capture end
1108
+ breaks = np.arange(start, end + (freq * 0.1), freq)
1109
+ else:
1110
+ # compute the period/start/end if unspecified (at most one)
1111
+ if periods is None:
1112
+ periods = int((end - start) // freq) + 1
1113
+ elif start is None:
1114
+ start = end - (periods - 1) * freq
1115
+ elif end is None:
1116
+ end = start + (periods - 1) * freq
1117
+
1118
+ breaks = np.linspace(start, end, periods)
1119
+ if all(is_integer(x) for x in com.not_none(start, end, freq)):
1120
+ # np.linspace always produces float output
1121
+
1122
+ # error: Argument 1 to "maybe_downcast_numeric" has incompatible type
1123
+ # "Union[ndarray[Any, Any], TimedeltaIndex, DatetimeIndex]";
1124
+ # expected "ndarray[Any, Any]" [
1125
+ breaks = maybe_downcast_numeric(
1126
+ breaks, # type: ignore[arg-type]
1127
+ np.dtype("int64"),
1128
+ )
1129
+ else:
1130
+ # delegate to the appropriate range function
1131
+ if isinstance(endpoint, Timestamp):
1132
+ breaks = date_range(start=start, end=end, periods=periods, freq=freq)
1133
+ else:
1134
+ breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq)
1135
+
1136
+ return IntervalIndex.from_breaks(breaks, name=name, closed=closed)