Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- videollama2/lib/python3.10/site-packages/pandas/core/__init__.py +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/accessor.py +340 -0
- videollama2/lib/python3.10/site-packages/pandas/core/algorithms.py +1747 -0
- videollama2/lib/python3.10/site-packages/pandas/core/api.py +140 -0
- videollama2/lib/python3.10/site-packages/pandas/core/generic.py +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/indexing.py +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/missing.py +1158 -0
- videollama2/lib/python3.10/site-packages/pandas/core/nanops.py +1748 -0
- videollama2/lib/python3.10/site-packages/pandas/core/resample.py +2920 -0
- videollama2/lib/python3.10/site-packages/pandas/core/sample.py +154 -0
- videollama2/lib/python3.10/site-packages/pandas/core/shared_docs.py +952 -0
- videollama2/lib/python3.10/site-packages/pandas/core/sorting.py +748 -0
- videollama2/lib/python3.10/site-packages/pandas/errors/__pycache__/__init__.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/tseries/__init__.py +12 -0
- videollama2/lib/python3.10/site-packages/pandas/tseries/__pycache__/__init__.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/tseries/__pycache__/api.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/tseries/__pycache__/frequencies.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/tseries/__pycache__/holiday.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/tseries/__pycache__/offsets.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/tseries/api.py +10 -0
- videollama2/lib/python3.10/site-packages/pandas/tseries/frequencies.py +602 -0
- videollama2/lib/python3.10/site-packages/pandas/tseries/holiday.py +634 -0
- videollama2/lib/python3.10/site-packages/pandas/tseries/offsets.py +91 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__init__.py +1 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__pycache__/pack_invert.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__pycache__/resize_buffers.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__pycache__/window_pos.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__pycache__/ycbcr_texture.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/pack_invert.py +37 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/resize_buffers.py +41 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/window_pos.py +76 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/ycbcr_texture.py +39 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESAX/__init__.py +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESAX/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESAX/__pycache__/texture_stack.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/MESAX/texture_stack.py +66 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/__init__.py +1 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/depth_clamp.py +54 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/framebuffer_multisample_coverage.py +45 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/gpu_program4.py +100 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/gpu_program5_mem_extended.py +62 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/occlusion_query.py +91 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/shader_thread_shuffle.py +23 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/texture_multisample.py +40 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/texture_shader.py +198 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vdpau_interop.py +59 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vertex_program1_1.py +48 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vertex_program4.py +95 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/OES/__pycache__/__init__.cpython-310.pyc +0 -0
videollama2/lib/python3.10/site-packages/pandas/core/__init__.py
ADDED
|
File without changes
|
videollama2/lib/python3.10/site-packages/pandas/core/accessor.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
|
| 3 |
+
accessor.py contains base classes for implementing accessor properties
|
| 4 |
+
that can be mixed into or pinned onto other pandas classes.
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
from typing import (
|
| 10 |
+
Callable,
|
| 11 |
+
final,
|
| 12 |
+
)
|
| 13 |
+
import warnings
|
| 14 |
+
|
| 15 |
+
from pandas.util._decorators import doc
|
| 16 |
+
from pandas.util._exceptions import find_stack_level
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class DirNamesMixin:
|
| 20 |
+
_accessors: set[str] = set()
|
| 21 |
+
_hidden_attrs: frozenset[str] = frozenset()
|
| 22 |
+
|
| 23 |
+
@final
|
| 24 |
+
def _dir_deletions(self) -> set[str]:
|
| 25 |
+
"""
|
| 26 |
+
Delete unwanted __dir__ for this object.
|
| 27 |
+
"""
|
| 28 |
+
return self._accessors | self._hidden_attrs
|
| 29 |
+
|
| 30 |
+
def _dir_additions(self) -> set[str]:
|
| 31 |
+
"""
|
| 32 |
+
Add additional __dir__ for this object.
|
| 33 |
+
"""
|
| 34 |
+
return {accessor for accessor in self._accessors if hasattr(self, accessor)}
|
| 35 |
+
|
| 36 |
+
def __dir__(self) -> list[str]:
|
| 37 |
+
"""
|
| 38 |
+
Provide method name lookup and completion.
|
| 39 |
+
|
| 40 |
+
Notes
|
| 41 |
+
-----
|
| 42 |
+
Only provide 'public' methods.
|
| 43 |
+
"""
|
| 44 |
+
rv = set(super().__dir__())
|
| 45 |
+
rv = (rv - self._dir_deletions()) | self._dir_additions()
|
| 46 |
+
return sorted(rv)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class PandasDelegate:
|
| 50 |
+
"""
|
| 51 |
+
Abstract base class for delegating methods/properties.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def _delegate_property_get(self, name: str, *args, **kwargs):
|
| 55 |
+
raise TypeError(f"You cannot access the property {name}")
|
| 56 |
+
|
| 57 |
+
def _delegate_property_set(self, name: str, value, *args, **kwargs):
|
| 58 |
+
raise TypeError(f"The property {name} cannot be set")
|
| 59 |
+
|
| 60 |
+
def _delegate_method(self, name: str, *args, **kwargs):
|
| 61 |
+
raise TypeError(f"You cannot call method {name}")
|
| 62 |
+
|
| 63 |
+
@classmethod
|
| 64 |
+
def _add_delegate_accessors(
|
| 65 |
+
cls,
|
| 66 |
+
delegate,
|
| 67 |
+
accessors: list[str],
|
| 68 |
+
typ: str,
|
| 69 |
+
overwrite: bool = False,
|
| 70 |
+
accessor_mapping: Callable[[str], str] = lambda x: x,
|
| 71 |
+
raise_on_missing: bool = True,
|
| 72 |
+
) -> None:
|
| 73 |
+
"""
|
| 74 |
+
Add accessors to cls from the delegate class.
|
| 75 |
+
|
| 76 |
+
Parameters
|
| 77 |
+
----------
|
| 78 |
+
cls
|
| 79 |
+
Class to add the methods/properties to.
|
| 80 |
+
delegate
|
| 81 |
+
Class to get methods/properties and doc-strings.
|
| 82 |
+
accessors : list of str
|
| 83 |
+
List of accessors to add.
|
| 84 |
+
typ : {'property', 'method'}
|
| 85 |
+
overwrite : bool, default False
|
| 86 |
+
Overwrite the method/property in the target class if it exists.
|
| 87 |
+
accessor_mapping: Callable, default lambda x: x
|
| 88 |
+
Callable to map the delegate's function to the cls' function.
|
| 89 |
+
raise_on_missing: bool, default True
|
| 90 |
+
Raise if an accessor does not exist on delegate.
|
| 91 |
+
False skips the missing accessor.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def _create_delegator_property(name: str):
|
| 95 |
+
def _getter(self):
|
| 96 |
+
return self._delegate_property_get(name)
|
| 97 |
+
|
| 98 |
+
def _setter(self, new_values):
|
| 99 |
+
return self._delegate_property_set(name, new_values)
|
| 100 |
+
|
| 101 |
+
_getter.__name__ = name
|
| 102 |
+
_setter.__name__ = name
|
| 103 |
+
|
| 104 |
+
return property(
|
| 105 |
+
fget=_getter,
|
| 106 |
+
fset=_setter,
|
| 107 |
+
doc=getattr(delegate, accessor_mapping(name)).__doc__,
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
def _create_delegator_method(name: str):
|
| 111 |
+
def f(self, *args, **kwargs):
|
| 112 |
+
return self._delegate_method(name, *args, **kwargs)
|
| 113 |
+
|
| 114 |
+
f.__name__ = name
|
| 115 |
+
f.__doc__ = getattr(delegate, accessor_mapping(name)).__doc__
|
| 116 |
+
|
| 117 |
+
return f
|
| 118 |
+
|
| 119 |
+
for name in accessors:
|
| 120 |
+
if (
|
| 121 |
+
not raise_on_missing
|
| 122 |
+
and getattr(delegate, accessor_mapping(name), None) is None
|
| 123 |
+
):
|
| 124 |
+
continue
|
| 125 |
+
|
| 126 |
+
if typ == "property":
|
| 127 |
+
f = _create_delegator_property(name)
|
| 128 |
+
else:
|
| 129 |
+
f = _create_delegator_method(name)
|
| 130 |
+
|
| 131 |
+
# don't overwrite existing methods/properties
|
| 132 |
+
if overwrite or not hasattr(cls, name):
|
| 133 |
+
setattr(cls, name, f)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def delegate_names(
|
| 137 |
+
delegate,
|
| 138 |
+
accessors: list[str],
|
| 139 |
+
typ: str,
|
| 140 |
+
overwrite: bool = False,
|
| 141 |
+
accessor_mapping: Callable[[str], str] = lambda x: x,
|
| 142 |
+
raise_on_missing: bool = True,
|
| 143 |
+
):
|
| 144 |
+
"""
|
| 145 |
+
Add delegated names to a class using a class decorator. This provides
|
| 146 |
+
an alternative usage to directly calling `_add_delegate_accessors`
|
| 147 |
+
below a class definition.
|
| 148 |
+
|
| 149 |
+
Parameters
|
| 150 |
+
----------
|
| 151 |
+
delegate : object
|
| 152 |
+
The class to get methods/properties & doc-strings.
|
| 153 |
+
accessors : Sequence[str]
|
| 154 |
+
List of accessor to add.
|
| 155 |
+
typ : {'property', 'method'}
|
| 156 |
+
overwrite : bool, default False
|
| 157 |
+
Overwrite the method/property in the target class if it exists.
|
| 158 |
+
accessor_mapping: Callable, default lambda x: x
|
| 159 |
+
Callable to map the delegate's function to the cls' function.
|
| 160 |
+
raise_on_missing: bool, default True
|
| 161 |
+
Raise if an accessor does not exist on delegate.
|
| 162 |
+
False skips the missing accessor.
|
| 163 |
+
|
| 164 |
+
Returns
|
| 165 |
+
-------
|
| 166 |
+
callable
|
| 167 |
+
A class decorator.
|
| 168 |
+
|
| 169 |
+
Examples
|
| 170 |
+
--------
|
| 171 |
+
@delegate_names(Categorical, ["categories", "ordered"], "property")
|
| 172 |
+
class CategoricalAccessor(PandasDelegate):
|
| 173 |
+
[...]
|
| 174 |
+
"""
|
| 175 |
+
|
| 176 |
+
def add_delegate_accessors(cls):
|
| 177 |
+
cls._add_delegate_accessors(
|
| 178 |
+
delegate,
|
| 179 |
+
accessors,
|
| 180 |
+
typ,
|
| 181 |
+
overwrite=overwrite,
|
| 182 |
+
accessor_mapping=accessor_mapping,
|
| 183 |
+
raise_on_missing=raise_on_missing,
|
| 184 |
+
)
|
| 185 |
+
return cls
|
| 186 |
+
|
| 187 |
+
return add_delegate_accessors
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
# Ported with modifications from xarray; licence at LICENSES/XARRAY_LICENSE
|
| 191 |
+
# https://github.com/pydata/xarray/blob/master/xarray/core/extensions.py
|
| 192 |
+
# 1. We don't need to catch and re-raise AttributeErrors as RuntimeErrors
|
| 193 |
+
# 2. We use a UserWarning instead of a custom Warning
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class CachedAccessor:
|
| 197 |
+
"""
|
| 198 |
+
Custom property-like object.
|
| 199 |
+
|
| 200 |
+
A descriptor for caching accessors.
|
| 201 |
+
|
| 202 |
+
Parameters
|
| 203 |
+
----------
|
| 204 |
+
name : str
|
| 205 |
+
Namespace that will be accessed under, e.g. ``df.foo``.
|
| 206 |
+
accessor : cls
|
| 207 |
+
Class with the extension methods.
|
| 208 |
+
|
| 209 |
+
Notes
|
| 210 |
+
-----
|
| 211 |
+
For accessor, The class's __init__ method assumes that one of
|
| 212 |
+
``Series``, ``DataFrame`` or ``Index`` as the
|
| 213 |
+
single argument ``data``.
|
| 214 |
+
"""
|
| 215 |
+
|
| 216 |
+
def __init__(self, name: str, accessor) -> None:
|
| 217 |
+
self._name = name
|
| 218 |
+
self._accessor = accessor
|
| 219 |
+
|
| 220 |
+
def __get__(self, obj, cls):
|
| 221 |
+
if obj is None:
|
| 222 |
+
# we're accessing the attribute of the class, i.e., Dataset.geo
|
| 223 |
+
return self._accessor
|
| 224 |
+
accessor_obj = self._accessor(obj)
|
| 225 |
+
# Replace the property with the accessor object. Inspired by:
|
| 226 |
+
# https://www.pydanny.com/cached-property.html
|
| 227 |
+
# We need to use object.__setattr__ because we overwrite __setattr__ on
|
| 228 |
+
# NDFrame
|
| 229 |
+
object.__setattr__(obj, self._name, accessor_obj)
|
| 230 |
+
return accessor_obj
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
@doc(klass="", others="")
|
| 234 |
+
def _register_accessor(name: str, cls):
|
| 235 |
+
"""
|
| 236 |
+
Register a custom accessor on {klass} objects.
|
| 237 |
+
|
| 238 |
+
Parameters
|
| 239 |
+
----------
|
| 240 |
+
name : str
|
| 241 |
+
Name under which the accessor should be registered. A warning is issued
|
| 242 |
+
if this name conflicts with a preexisting attribute.
|
| 243 |
+
|
| 244 |
+
Returns
|
| 245 |
+
-------
|
| 246 |
+
callable
|
| 247 |
+
A class decorator.
|
| 248 |
+
|
| 249 |
+
See Also
|
| 250 |
+
--------
|
| 251 |
+
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
|
| 252 |
+
register_series_accessor : Register a custom accessor on Series objects.
|
| 253 |
+
register_index_accessor : Register a custom accessor on Index objects.
|
| 254 |
+
|
| 255 |
+
Notes
|
| 256 |
+
-----
|
| 257 |
+
When accessed, your accessor will be initialized with the pandas object
|
| 258 |
+
the user is interacting with. So the signature must be
|
| 259 |
+
|
| 260 |
+
.. code-block:: python
|
| 261 |
+
|
| 262 |
+
def __init__(self, pandas_object): # noqa: E999
|
| 263 |
+
...
|
| 264 |
+
|
| 265 |
+
For consistency with pandas methods, you should raise an ``AttributeError``
|
| 266 |
+
if the data passed to your accessor has an incorrect dtype.
|
| 267 |
+
|
| 268 |
+
>>> pd.Series(['a', 'b']).dt
|
| 269 |
+
Traceback (most recent call last):
|
| 270 |
+
...
|
| 271 |
+
AttributeError: Can only use .dt accessor with datetimelike values
|
| 272 |
+
|
| 273 |
+
Examples
|
| 274 |
+
--------
|
| 275 |
+
In your library code::
|
| 276 |
+
|
| 277 |
+
import pandas as pd
|
| 278 |
+
|
| 279 |
+
@pd.api.extensions.register_dataframe_accessor("geo")
|
| 280 |
+
class GeoAccessor:
|
| 281 |
+
def __init__(self, pandas_obj):
|
| 282 |
+
self._obj = pandas_obj
|
| 283 |
+
|
| 284 |
+
@property
|
| 285 |
+
def center(self):
|
| 286 |
+
# return the geographic center point of this DataFrame
|
| 287 |
+
lat = self._obj.latitude
|
| 288 |
+
lon = self._obj.longitude
|
| 289 |
+
return (float(lon.mean()), float(lat.mean()))
|
| 290 |
+
|
| 291 |
+
def plot(self):
|
| 292 |
+
# plot this array's data on a map, e.g., using Cartopy
|
| 293 |
+
pass
|
| 294 |
+
|
| 295 |
+
Back in an interactive IPython session:
|
| 296 |
+
|
| 297 |
+
.. code-block:: ipython
|
| 298 |
+
|
| 299 |
+
In [1]: ds = pd.DataFrame({{"longitude": np.linspace(0, 10),
|
| 300 |
+
...: "latitude": np.linspace(0, 20)}})
|
| 301 |
+
In [2]: ds.geo.center
|
| 302 |
+
Out[2]: (5.0, 10.0)
|
| 303 |
+
In [3]: ds.geo.plot() # plots data on a map
|
| 304 |
+
"""
|
| 305 |
+
|
| 306 |
+
def decorator(accessor):
|
| 307 |
+
if hasattr(cls, name):
|
| 308 |
+
warnings.warn(
|
| 309 |
+
f"registration of accessor {repr(accessor)} under name "
|
| 310 |
+
f"{repr(name)} for type {repr(cls)} is overriding a preexisting "
|
| 311 |
+
f"attribute with the same name.",
|
| 312 |
+
UserWarning,
|
| 313 |
+
stacklevel=find_stack_level(),
|
| 314 |
+
)
|
| 315 |
+
setattr(cls, name, CachedAccessor(name, accessor))
|
| 316 |
+
cls._accessors.add(name)
|
| 317 |
+
return accessor
|
| 318 |
+
|
| 319 |
+
return decorator
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
@doc(_register_accessor, klass="DataFrame")
|
| 323 |
+
def register_dataframe_accessor(name: str):
|
| 324 |
+
from pandas import DataFrame
|
| 325 |
+
|
| 326 |
+
return _register_accessor(name, DataFrame)
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
@doc(_register_accessor, klass="Series")
|
| 330 |
+
def register_series_accessor(name: str):
|
| 331 |
+
from pandas import Series
|
| 332 |
+
|
| 333 |
+
return _register_accessor(name, Series)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
@doc(_register_accessor, klass="Index")
|
| 337 |
+
def register_index_accessor(name: str):
|
| 338 |
+
from pandas import Index
|
| 339 |
+
|
| 340 |
+
return _register_accessor(name, Index)
|
videollama2/lib/python3.10/site-packages/pandas/core/algorithms.py
ADDED
|
@@ -0,0 +1,1747 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Generic data algorithms. This module is experimental at the moment and not
|
| 3 |
+
intended for public consumption
|
| 4 |
+
"""
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import decimal
|
| 8 |
+
import operator
|
| 9 |
+
from textwrap import dedent
|
| 10 |
+
from typing import (
|
| 11 |
+
TYPE_CHECKING,
|
| 12 |
+
Literal,
|
| 13 |
+
cast,
|
| 14 |
+
)
|
| 15 |
+
import warnings
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from pandas._libs import (
|
| 20 |
+
algos,
|
| 21 |
+
hashtable as htable,
|
| 22 |
+
iNaT,
|
| 23 |
+
lib,
|
| 24 |
+
)
|
| 25 |
+
from pandas._typing import (
|
| 26 |
+
AnyArrayLike,
|
| 27 |
+
ArrayLike,
|
| 28 |
+
AxisInt,
|
| 29 |
+
DtypeObj,
|
| 30 |
+
TakeIndexer,
|
| 31 |
+
npt,
|
| 32 |
+
)
|
| 33 |
+
from pandas.util._decorators import doc
|
| 34 |
+
from pandas.util._exceptions import find_stack_level
|
| 35 |
+
|
| 36 |
+
from pandas.core.dtypes.cast import (
|
| 37 |
+
construct_1d_object_array_from_listlike,
|
| 38 |
+
np_find_common_type,
|
| 39 |
+
)
|
| 40 |
+
from pandas.core.dtypes.common import (
|
| 41 |
+
ensure_float64,
|
| 42 |
+
ensure_object,
|
| 43 |
+
ensure_platform_int,
|
| 44 |
+
is_array_like,
|
| 45 |
+
is_bool_dtype,
|
| 46 |
+
is_complex_dtype,
|
| 47 |
+
is_dict_like,
|
| 48 |
+
is_extension_array_dtype,
|
| 49 |
+
is_float_dtype,
|
| 50 |
+
is_integer,
|
| 51 |
+
is_integer_dtype,
|
| 52 |
+
is_list_like,
|
| 53 |
+
is_object_dtype,
|
| 54 |
+
is_signed_integer_dtype,
|
| 55 |
+
needs_i8_conversion,
|
| 56 |
+
)
|
| 57 |
+
from pandas.core.dtypes.concat import concat_compat
|
| 58 |
+
from pandas.core.dtypes.dtypes import (
|
| 59 |
+
BaseMaskedDtype,
|
| 60 |
+
CategoricalDtype,
|
| 61 |
+
ExtensionDtype,
|
| 62 |
+
NumpyEADtype,
|
| 63 |
+
)
|
| 64 |
+
from pandas.core.dtypes.generic import (
|
| 65 |
+
ABCDatetimeArray,
|
| 66 |
+
ABCExtensionArray,
|
| 67 |
+
ABCIndex,
|
| 68 |
+
ABCMultiIndex,
|
| 69 |
+
ABCSeries,
|
| 70 |
+
ABCTimedeltaArray,
|
| 71 |
+
)
|
| 72 |
+
from pandas.core.dtypes.missing import (
|
| 73 |
+
isna,
|
| 74 |
+
na_value_for_dtype,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
from pandas.core.array_algos.take import take_nd
|
| 78 |
+
from pandas.core.construction import (
|
| 79 |
+
array as pd_array,
|
| 80 |
+
ensure_wrapped_if_datetimelike,
|
| 81 |
+
extract_array,
|
| 82 |
+
)
|
| 83 |
+
from pandas.core.indexers import validate_indices
|
| 84 |
+
|
| 85 |
+
if TYPE_CHECKING:
|
| 86 |
+
from pandas._typing import (
|
| 87 |
+
ListLike,
|
| 88 |
+
NumpySorter,
|
| 89 |
+
NumpyValueArrayLike,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
from pandas import (
|
| 93 |
+
Categorical,
|
| 94 |
+
Index,
|
| 95 |
+
Series,
|
| 96 |
+
)
|
| 97 |
+
from pandas.core.arrays import (
|
| 98 |
+
BaseMaskedArray,
|
| 99 |
+
ExtensionArray,
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
# --------------- #
|
| 104 |
+
# dtype access #
|
| 105 |
+
# --------------- #
|
| 106 |
+
def _ensure_data(values: ArrayLike) -> np.ndarray:
|
| 107 |
+
"""
|
| 108 |
+
routine to ensure that our data is of the correct
|
| 109 |
+
input dtype for lower-level routines
|
| 110 |
+
|
| 111 |
+
This will coerce:
|
| 112 |
+
- ints -> int64
|
| 113 |
+
- uint -> uint64
|
| 114 |
+
- bool -> uint8
|
| 115 |
+
- datetimelike -> i8
|
| 116 |
+
- datetime64tz -> i8 (in local tz)
|
| 117 |
+
- categorical -> codes
|
| 118 |
+
|
| 119 |
+
Parameters
|
| 120 |
+
----------
|
| 121 |
+
values : np.ndarray or ExtensionArray
|
| 122 |
+
|
| 123 |
+
Returns
|
| 124 |
+
-------
|
| 125 |
+
np.ndarray
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
if not isinstance(values, ABCMultiIndex):
|
| 129 |
+
# extract_array would raise
|
| 130 |
+
values = extract_array(values, extract_numpy=True)
|
| 131 |
+
|
| 132 |
+
if is_object_dtype(values.dtype):
|
| 133 |
+
return ensure_object(np.asarray(values))
|
| 134 |
+
|
| 135 |
+
elif isinstance(values.dtype, BaseMaskedDtype):
|
| 136 |
+
# i.e. BooleanArray, FloatingArray, IntegerArray
|
| 137 |
+
values = cast("BaseMaskedArray", values)
|
| 138 |
+
if not values._hasna:
|
| 139 |
+
# No pd.NAs -> We can avoid an object-dtype cast (and copy) GH#41816
|
| 140 |
+
# recurse to avoid re-implementing logic for eg bool->uint8
|
| 141 |
+
return _ensure_data(values._data)
|
| 142 |
+
return np.asarray(values)
|
| 143 |
+
|
| 144 |
+
elif isinstance(values.dtype, CategoricalDtype):
|
| 145 |
+
# NB: cases that go through here should NOT be using _reconstruct_data
|
| 146 |
+
# on the back-end.
|
| 147 |
+
values = cast("Categorical", values)
|
| 148 |
+
return values.codes
|
| 149 |
+
|
| 150 |
+
elif is_bool_dtype(values.dtype):
|
| 151 |
+
if isinstance(values, np.ndarray):
|
| 152 |
+
# i.e. actually dtype == np.dtype("bool")
|
| 153 |
+
return np.asarray(values).view("uint8")
|
| 154 |
+
else:
|
| 155 |
+
# e.g. Sparse[bool, False] # TODO: no test cases get here
|
| 156 |
+
return np.asarray(values).astype("uint8", copy=False)
|
| 157 |
+
|
| 158 |
+
elif is_integer_dtype(values.dtype):
|
| 159 |
+
return np.asarray(values)
|
| 160 |
+
|
| 161 |
+
elif is_float_dtype(values.dtype):
|
| 162 |
+
# Note: checking `values.dtype == "float128"` raises on Windows and 32bit
|
| 163 |
+
# error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]"
|
| 164 |
+
# has no attribute "itemsize"
|
| 165 |
+
if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr]
|
| 166 |
+
# we dont (yet) have float128 hashtable support
|
| 167 |
+
return ensure_float64(values)
|
| 168 |
+
return np.asarray(values)
|
| 169 |
+
|
| 170 |
+
elif is_complex_dtype(values.dtype):
|
| 171 |
+
return cast(np.ndarray, values)
|
| 172 |
+
|
| 173 |
+
# datetimelike
|
| 174 |
+
elif needs_i8_conversion(values.dtype):
|
| 175 |
+
npvalues = values.view("i8")
|
| 176 |
+
npvalues = cast(np.ndarray, npvalues)
|
| 177 |
+
return npvalues
|
| 178 |
+
|
| 179 |
+
# we have failed, return object
|
| 180 |
+
values = np.asarray(values, dtype=object)
|
| 181 |
+
return ensure_object(values)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def _reconstruct_data(
|
| 185 |
+
values: ArrayLike, dtype: DtypeObj, original: AnyArrayLike
|
| 186 |
+
) -> ArrayLike:
|
| 187 |
+
"""
|
| 188 |
+
reverse of _ensure_data
|
| 189 |
+
|
| 190 |
+
Parameters
|
| 191 |
+
----------
|
| 192 |
+
values : np.ndarray or ExtensionArray
|
| 193 |
+
dtype : np.dtype or ExtensionDtype
|
| 194 |
+
original : AnyArrayLike
|
| 195 |
+
|
| 196 |
+
Returns
|
| 197 |
+
-------
|
| 198 |
+
ExtensionArray or np.ndarray
|
| 199 |
+
"""
|
| 200 |
+
if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
|
| 201 |
+
# Catch DatetimeArray/TimedeltaArray
|
| 202 |
+
return values
|
| 203 |
+
|
| 204 |
+
if not isinstance(dtype, np.dtype):
|
| 205 |
+
# i.e. ExtensionDtype; note we have ruled out above the possibility
|
| 206 |
+
# that values.dtype == dtype
|
| 207 |
+
cls = dtype.construct_array_type()
|
| 208 |
+
|
| 209 |
+
values = cls._from_sequence(values, dtype=dtype)
|
| 210 |
+
|
| 211 |
+
else:
|
| 212 |
+
values = values.astype(dtype, copy=False)
|
| 213 |
+
|
| 214 |
+
return values
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def _ensure_arraylike(values, func_name: str) -> ArrayLike:
|
| 218 |
+
"""
|
| 219 |
+
ensure that we are arraylike if not already
|
| 220 |
+
"""
|
| 221 |
+
if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
|
| 222 |
+
# GH#52986
|
| 223 |
+
if func_name != "isin-targets":
|
| 224 |
+
# Make an exception for the comps argument in isin.
|
| 225 |
+
warnings.warn(
|
| 226 |
+
f"{func_name} with argument that is not not a Series, Index, "
|
| 227 |
+
"ExtensionArray, or np.ndarray is deprecated and will raise in a "
|
| 228 |
+
"future version.",
|
| 229 |
+
FutureWarning,
|
| 230 |
+
stacklevel=find_stack_level(),
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
inferred = lib.infer_dtype(values, skipna=False)
|
| 234 |
+
if inferred in ["mixed", "string", "mixed-integer"]:
|
| 235 |
+
# "mixed-integer" to ensure we do not cast ["ss", 42] to str GH#22160
|
| 236 |
+
if isinstance(values, tuple):
|
| 237 |
+
values = list(values)
|
| 238 |
+
values = construct_1d_object_array_from_listlike(values)
|
| 239 |
+
else:
|
| 240 |
+
values = np.asarray(values)
|
| 241 |
+
return values
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
_hashtables = {
|
| 245 |
+
"complex128": htable.Complex128HashTable,
|
| 246 |
+
"complex64": htable.Complex64HashTable,
|
| 247 |
+
"float64": htable.Float64HashTable,
|
| 248 |
+
"float32": htable.Float32HashTable,
|
| 249 |
+
"uint64": htable.UInt64HashTable,
|
| 250 |
+
"uint32": htable.UInt32HashTable,
|
| 251 |
+
"uint16": htable.UInt16HashTable,
|
| 252 |
+
"uint8": htable.UInt8HashTable,
|
| 253 |
+
"int64": htable.Int64HashTable,
|
| 254 |
+
"int32": htable.Int32HashTable,
|
| 255 |
+
"int16": htable.Int16HashTable,
|
| 256 |
+
"int8": htable.Int8HashTable,
|
| 257 |
+
"string": htable.StringHashTable,
|
| 258 |
+
"object": htable.PyObjectHashTable,
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def _get_hashtable_algo(values: np.ndarray):
|
| 263 |
+
"""
|
| 264 |
+
Parameters
|
| 265 |
+
----------
|
| 266 |
+
values : np.ndarray
|
| 267 |
+
|
| 268 |
+
Returns
|
| 269 |
+
-------
|
| 270 |
+
htable : HashTable subclass
|
| 271 |
+
values : ndarray
|
| 272 |
+
"""
|
| 273 |
+
values = _ensure_data(values)
|
| 274 |
+
|
| 275 |
+
ndtype = _check_object_for_strings(values)
|
| 276 |
+
hashtable = _hashtables[ndtype]
|
| 277 |
+
return hashtable, values
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def _check_object_for_strings(values: np.ndarray) -> str:
|
| 281 |
+
"""
|
| 282 |
+
Check if we can use string hashtable instead of object hashtable.
|
| 283 |
+
|
| 284 |
+
Parameters
|
| 285 |
+
----------
|
| 286 |
+
values : ndarray
|
| 287 |
+
|
| 288 |
+
Returns
|
| 289 |
+
-------
|
| 290 |
+
str
|
| 291 |
+
"""
|
| 292 |
+
ndtype = values.dtype.name
|
| 293 |
+
if ndtype == "object":
|
| 294 |
+
# it's cheaper to use a String Hash Table than Object; we infer
|
| 295 |
+
# including nulls because that is the only difference between
|
| 296 |
+
# StringHashTable and ObjectHashtable
|
| 297 |
+
if lib.is_string_array(values, skipna=False):
|
| 298 |
+
ndtype = "string"
|
| 299 |
+
return ndtype
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
# --------------- #
|
| 303 |
+
# top-level algos #
|
| 304 |
+
# --------------- #
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def unique(values):
|
| 308 |
+
"""
|
| 309 |
+
Return unique values based on a hash table.
|
| 310 |
+
|
| 311 |
+
Uniques are returned in order of appearance. This does NOT sort.
|
| 312 |
+
|
| 313 |
+
Significantly faster than numpy.unique for long enough sequences.
|
| 314 |
+
Includes NA values.
|
| 315 |
+
|
| 316 |
+
Parameters
|
| 317 |
+
----------
|
| 318 |
+
values : 1d array-like
|
| 319 |
+
|
| 320 |
+
Returns
|
| 321 |
+
-------
|
| 322 |
+
numpy.ndarray or ExtensionArray
|
| 323 |
+
|
| 324 |
+
The return can be:
|
| 325 |
+
|
| 326 |
+
* Index : when the input is an Index
|
| 327 |
+
* Categorical : when the input is a Categorical dtype
|
| 328 |
+
* ndarray : when the input is a Series/ndarray
|
| 329 |
+
|
| 330 |
+
Return numpy.ndarray or ExtensionArray.
|
| 331 |
+
|
| 332 |
+
See Also
|
| 333 |
+
--------
|
| 334 |
+
Index.unique : Return unique values from an Index.
|
| 335 |
+
Series.unique : Return unique values of Series object.
|
| 336 |
+
|
| 337 |
+
Examples
|
| 338 |
+
--------
|
| 339 |
+
>>> pd.unique(pd.Series([2, 1, 3, 3]))
|
| 340 |
+
array([2, 1, 3])
|
| 341 |
+
|
| 342 |
+
>>> pd.unique(pd.Series([2] + [1] * 5))
|
| 343 |
+
array([2, 1])
|
| 344 |
+
|
| 345 |
+
>>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")]))
|
| 346 |
+
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
|
| 347 |
+
|
| 348 |
+
>>> pd.unique(
|
| 349 |
+
... pd.Series(
|
| 350 |
+
... [
|
| 351 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
| 352 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
| 353 |
+
... ]
|
| 354 |
+
... )
|
| 355 |
+
... )
|
| 356 |
+
<DatetimeArray>
|
| 357 |
+
['2016-01-01 00:00:00-05:00']
|
| 358 |
+
Length: 1, dtype: datetime64[ns, US/Eastern]
|
| 359 |
+
|
| 360 |
+
>>> pd.unique(
|
| 361 |
+
... pd.Index(
|
| 362 |
+
... [
|
| 363 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
| 364 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
| 365 |
+
... ]
|
| 366 |
+
... )
|
| 367 |
+
... )
|
| 368 |
+
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
|
| 369 |
+
dtype='datetime64[ns, US/Eastern]',
|
| 370 |
+
freq=None)
|
| 371 |
+
|
| 372 |
+
>>> pd.unique(np.array(list("baabc"), dtype="O"))
|
| 373 |
+
array(['b', 'a', 'c'], dtype=object)
|
| 374 |
+
|
| 375 |
+
An unordered Categorical will return categories in the
|
| 376 |
+
order of appearance.
|
| 377 |
+
|
| 378 |
+
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"))))
|
| 379 |
+
['b', 'a', 'c']
|
| 380 |
+
Categories (3, object): ['a', 'b', 'c']
|
| 381 |
+
|
| 382 |
+
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc"))))
|
| 383 |
+
['b', 'a', 'c']
|
| 384 |
+
Categories (3, object): ['a', 'b', 'c']
|
| 385 |
+
|
| 386 |
+
An ordered Categorical preserves the category ordering.
|
| 387 |
+
|
| 388 |
+
>>> pd.unique(
|
| 389 |
+
... pd.Series(
|
| 390 |
+
... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
|
| 391 |
+
... )
|
| 392 |
+
... )
|
| 393 |
+
['b', 'a', 'c']
|
| 394 |
+
Categories (3, object): ['a' < 'b' < 'c']
|
| 395 |
+
|
| 396 |
+
An array of tuples
|
| 397 |
+
|
| 398 |
+
>>> pd.unique(pd.Series([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")]).values)
|
| 399 |
+
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
|
| 400 |
+
"""
|
| 401 |
+
return unique_with_mask(values)
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def nunique_ints(values: ArrayLike) -> int:
|
| 405 |
+
"""
|
| 406 |
+
Return the number of unique values for integer array-likes.
|
| 407 |
+
|
| 408 |
+
Significantly faster than pandas.unique for long enough sequences.
|
| 409 |
+
No checks are done to ensure input is integral.
|
| 410 |
+
|
| 411 |
+
Parameters
|
| 412 |
+
----------
|
| 413 |
+
values : 1d array-like
|
| 414 |
+
|
| 415 |
+
Returns
|
| 416 |
+
-------
|
| 417 |
+
int : The number of unique values in ``values``
|
| 418 |
+
"""
|
| 419 |
+
if len(values) == 0:
|
| 420 |
+
return 0
|
| 421 |
+
values = _ensure_data(values)
|
| 422 |
+
# bincount requires intp
|
| 423 |
+
result = (np.bincount(values.ravel().astype("intp")) != 0).sum()
|
| 424 |
+
return result
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def unique_with_mask(values, mask: npt.NDArray[np.bool_] | None = None):
|
| 428 |
+
"""See algorithms.unique for docs. Takes a mask for masked arrays."""
|
| 429 |
+
values = _ensure_arraylike(values, func_name="unique")
|
| 430 |
+
|
| 431 |
+
if isinstance(values.dtype, ExtensionDtype):
|
| 432 |
+
# Dispatch to extension dtype's unique.
|
| 433 |
+
return values.unique()
|
| 434 |
+
|
| 435 |
+
original = values
|
| 436 |
+
hashtable, values = _get_hashtable_algo(values)
|
| 437 |
+
|
| 438 |
+
table = hashtable(len(values))
|
| 439 |
+
if mask is None:
|
| 440 |
+
uniques = table.unique(values)
|
| 441 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
| 442 |
+
return uniques
|
| 443 |
+
|
| 444 |
+
else:
|
| 445 |
+
uniques, mask = table.unique(values, mask=mask)
|
| 446 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
| 447 |
+
assert mask is not None # for mypy
|
| 448 |
+
return uniques, mask.astype("bool")
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
unique1d = unique
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
_MINIMUM_COMP_ARR_LEN = 1_000_000
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def isin(comps: ListLike, values: ListLike) -> npt.NDArray[np.bool_]:
|
| 458 |
+
"""
|
| 459 |
+
Compute the isin boolean array.
|
| 460 |
+
|
| 461 |
+
Parameters
|
| 462 |
+
----------
|
| 463 |
+
comps : list-like
|
| 464 |
+
values : list-like
|
| 465 |
+
|
| 466 |
+
Returns
|
| 467 |
+
-------
|
| 468 |
+
ndarray[bool]
|
| 469 |
+
Same length as `comps`.
|
| 470 |
+
"""
|
| 471 |
+
if not is_list_like(comps):
|
| 472 |
+
raise TypeError(
|
| 473 |
+
"only list-like objects are allowed to be passed "
|
| 474 |
+
f"to isin(), you passed a `{type(comps).__name__}`"
|
| 475 |
+
)
|
| 476 |
+
if not is_list_like(values):
|
| 477 |
+
raise TypeError(
|
| 478 |
+
"only list-like objects are allowed to be passed "
|
| 479 |
+
f"to isin(), you passed a `{type(values).__name__}`"
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
|
| 483 |
+
orig_values = list(values)
|
| 484 |
+
values = _ensure_arraylike(orig_values, func_name="isin-targets")
|
| 485 |
+
|
| 486 |
+
if (
|
| 487 |
+
len(values) > 0
|
| 488 |
+
and values.dtype.kind in "iufcb"
|
| 489 |
+
and not is_signed_integer_dtype(comps)
|
| 490 |
+
):
|
| 491 |
+
# GH#46485 Use object to avoid upcast to float64 later
|
| 492 |
+
# TODO: Share with _find_common_type_compat
|
| 493 |
+
values = construct_1d_object_array_from_listlike(orig_values)
|
| 494 |
+
|
| 495 |
+
elif isinstance(values, ABCMultiIndex):
|
| 496 |
+
# Avoid raising in extract_array
|
| 497 |
+
values = np.array(values)
|
| 498 |
+
else:
|
| 499 |
+
values = extract_array(values, extract_numpy=True, extract_range=True)
|
| 500 |
+
|
| 501 |
+
comps_array = _ensure_arraylike(comps, func_name="isin")
|
| 502 |
+
comps_array = extract_array(comps_array, extract_numpy=True)
|
| 503 |
+
if not isinstance(comps_array, np.ndarray):
|
| 504 |
+
# i.e. Extension Array
|
| 505 |
+
return comps_array.isin(values)
|
| 506 |
+
|
| 507 |
+
elif needs_i8_conversion(comps_array.dtype):
|
| 508 |
+
# Dispatch to DatetimeLikeArrayMixin.isin
|
| 509 |
+
return pd_array(comps_array).isin(values)
|
| 510 |
+
elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps_array.dtype):
|
| 511 |
+
# e.g. comps_array are integers and values are datetime64s
|
| 512 |
+
return np.zeros(comps_array.shape, dtype=bool)
|
| 513 |
+
# TODO: not quite right ... Sparse/Categorical
|
| 514 |
+
elif needs_i8_conversion(values.dtype):
|
| 515 |
+
return isin(comps_array, values.astype(object))
|
| 516 |
+
|
| 517 |
+
elif isinstance(values.dtype, ExtensionDtype):
|
| 518 |
+
return isin(np.asarray(comps_array), np.asarray(values))
|
| 519 |
+
|
| 520 |
+
# GH16012
|
| 521 |
+
# Ensure np.isin doesn't get object types or it *may* throw an exception
|
| 522 |
+
# Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
|
| 523 |
+
# isin is faster for small sizes
|
| 524 |
+
if (
|
| 525 |
+
len(comps_array) > _MINIMUM_COMP_ARR_LEN
|
| 526 |
+
and len(values) <= 26
|
| 527 |
+
and comps_array.dtype != object
|
| 528 |
+
):
|
| 529 |
+
# If the values include nan we need to check for nan explicitly
|
| 530 |
+
# since np.nan it not equal to np.nan
|
| 531 |
+
if isna(values).any():
|
| 532 |
+
|
| 533 |
+
def f(c, v):
|
| 534 |
+
return np.logical_or(np.isin(c, v).ravel(), np.isnan(c))
|
| 535 |
+
|
| 536 |
+
else:
|
| 537 |
+
f = lambda a, b: np.isin(a, b).ravel()
|
| 538 |
+
|
| 539 |
+
else:
|
| 540 |
+
common = np_find_common_type(values.dtype, comps_array.dtype)
|
| 541 |
+
values = values.astype(common, copy=False)
|
| 542 |
+
comps_array = comps_array.astype(common, copy=False)
|
| 543 |
+
f = htable.ismember
|
| 544 |
+
|
| 545 |
+
return f(comps_array, values)
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
def factorize_array(
|
| 549 |
+
values: np.ndarray,
|
| 550 |
+
use_na_sentinel: bool = True,
|
| 551 |
+
size_hint: int | None = None,
|
| 552 |
+
na_value: object = None,
|
| 553 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 554 |
+
) -> tuple[npt.NDArray[np.intp], np.ndarray]:
|
| 555 |
+
"""
|
| 556 |
+
Factorize a numpy array to codes and uniques.
|
| 557 |
+
|
| 558 |
+
This doesn't do any coercion of types or unboxing before factorization.
|
| 559 |
+
|
| 560 |
+
Parameters
|
| 561 |
+
----------
|
| 562 |
+
values : ndarray
|
| 563 |
+
use_na_sentinel : bool, default True
|
| 564 |
+
If True, the sentinel -1 will be used for NaN values. If False,
|
| 565 |
+
NaN values will be encoded as non-negative integers and will not drop the
|
| 566 |
+
NaN from the uniques of the values.
|
| 567 |
+
size_hint : int, optional
|
| 568 |
+
Passed through to the hashtable's 'get_labels' method
|
| 569 |
+
na_value : object, optional
|
| 570 |
+
A value in `values` to consider missing. Note: only use this
|
| 571 |
+
parameter when you know that you don't have any values pandas would
|
| 572 |
+
consider missing in the array (NaN for float data, iNaT for
|
| 573 |
+
datetimes, etc.).
|
| 574 |
+
mask : ndarray[bool], optional
|
| 575 |
+
If not None, the mask is used as indicator for missing values
|
| 576 |
+
(True = missing, False = valid) instead of `na_value` or
|
| 577 |
+
condition "val != val".
|
| 578 |
+
|
| 579 |
+
Returns
|
| 580 |
+
-------
|
| 581 |
+
codes : ndarray[np.intp]
|
| 582 |
+
uniques : ndarray
|
| 583 |
+
"""
|
| 584 |
+
original = values
|
| 585 |
+
if values.dtype.kind in "mM":
|
| 586 |
+
# _get_hashtable_algo will cast dt64/td64 to i8 via _ensure_data, so we
|
| 587 |
+
# need to do the same to na_value. We are assuming here that the passed
|
| 588 |
+
# na_value is an appropriately-typed NaT.
|
| 589 |
+
# e.g. test_where_datetimelike_categorical
|
| 590 |
+
na_value = iNaT
|
| 591 |
+
|
| 592 |
+
hash_klass, values = _get_hashtable_algo(values)
|
| 593 |
+
|
| 594 |
+
table = hash_klass(size_hint or len(values))
|
| 595 |
+
uniques, codes = table.factorize(
|
| 596 |
+
values,
|
| 597 |
+
na_sentinel=-1,
|
| 598 |
+
na_value=na_value,
|
| 599 |
+
mask=mask,
|
| 600 |
+
ignore_na=use_na_sentinel,
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
# re-cast e.g. i8->dt64/td64, uint8->bool
|
| 604 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
| 605 |
+
|
| 606 |
+
codes = ensure_platform_int(codes)
|
| 607 |
+
return codes, uniques
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
@doc(
|
| 611 |
+
values=dedent(
|
| 612 |
+
"""\
|
| 613 |
+
values : sequence
|
| 614 |
+
A 1-D sequence. Sequences that aren't pandas objects are
|
| 615 |
+
coerced to ndarrays before factorization.
|
| 616 |
+
"""
|
| 617 |
+
),
|
| 618 |
+
sort=dedent(
|
| 619 |
+
"""\
|
| 620 |
+
sort : bool, default False
|
| 621 |
+
Sort `uniques` and shuffle `codes` to maintain the
|
| 622 |
+
relationship.
|
| 623 |
+
"""
|
| 624 |
+
),
|
| 625 |
+
size_hint=dedent(
|
| 626 |
+
"""\
|
| 627 |
+
size_hint : int, optional
|
| 628 |
+
Hint to the hashtable sizer.
|
| 629 |
+
"""
|
| 630 |
+
),
|
| 631 |
+
)
|
| 632 |
+
def factorize(
|
| 633 |
+
values,
|
| 634 |
+
sort: bool = False,
|
| 635 |
+
use_na_sentinel: bool = True,
|
| 636 |
+
size_hint: int | None = None,
|
| 637 |
+
) -> tuple[np.ndarray, np.ndarray | Index]:
|
| 638 |
+
"""
|
| 639 |
+
Encode the object as an enumerated type or categorical variable.
|
| 640 |
+
|
| 641 |
+
This method is useful for obtaining a numeric representation of an
|
| 642 |
+
array when all that matters is identifying distinct values. `factorize`
|
| 643 |
+
is available as both a top-level function :func:`pandas.factorize`,
|
| 644 |
+
and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
|
| 645 |
+
|
| 646 |
+
Parameters
|
| 647 |
+
----------
|
| 648 |
+
{values}{sort}
|
| 649 |
+
use_na_sentinel : bool, default True
|
| 650 |
+
If True, the sentinel -1 will be used for NaN values. If False,
|
| 651 |
+
NaN values will be encoded as non-negative integers and will not drop the
|
| 652 |
+
NaN from the uniques of the values.
|
| 653 |
+
|
| 654 |
+
.. versionadded:: 1.5.0
|
| 655 |
+
{size_hint}\
|
| 656 |
+
|
| 657 |
+
Returns
|
| 658 |
+
-------
|
| 659 |
+
codes : ndarray
|
| 660 |
+
An integer ndarray that's an indexer into `uniques`.
|
| 661 |
+
``uniques.take(codes)`` will have the same values as `values`.
|
| 662 |
+
uniques : ndarray, Index, or Categorical
|
| 663 |
+
The unique valid values. When `values` is Categorical, `uniques`
|
| 664 |
+
is a Categorical. When `values` is some other pandas object, an
|
| 665 |
+
`Index` is returned. Otherwise, a 1-D ndarray is returned.
|
| 666 |
+
|
| 667 |
+
.. note::
|
| 668 |
+
|
| 669 |
+
Even if there's a missing value in `values`, `uniques` will
|
| 670 |
+
*not* contain an entry for it.
|
| 671 |
+
|
| 672 |
+
See Also
|
| 673 |
+
--------
|
| 674 |
+
cut : Discretize continuous-valued array.
|
| 675 |
+
unique : Find the unique value in an array.
|
| 676 |
+
|
| 677 |
+
Notes
|
| 678 |
+
-----
|
| 679 |
+
Reference :ref:`the user guide <reshaping.factorize>` for more examples.
|
| 680 |
+
|
| 681 |
+
Examples
|
| 682 |
+
--------
|
| 683 |
+
These examples all show factorize as a top-level method like
|
| 684 |
+
``pd.factorize(values)``. The results are identical for methods like
|
| 685 |
+
:meth:`Series.factorize`.
|
| 686 |
+
|
| 687 |
+
>>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"))
|
| 688 |
+
>>> codes
|
| 689 |
+
array([0, 0, 1, 2, 0])
|
| 690 |
+
>>> uniques
|
| 691 |
+
array(['b', 'a', 'c'], dtype=object)
|
| 692 |
+
|
| 693 |
+
With ``sort=True``, the `uniques` will be sorted, and `codes` will be
|
| 694 |
+
shuffled so that the relationship is the maintained.
|
| 695 |
+
|
| 696 |
+
>>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"),
|
| 697 |
+
... sort=True)
|
| 698 |
+
>>> codes
|
| 699 |
+
array([1, 1, 0, 2, 1])
|
| 700 |
+
>>> uniques
|
| 701 |
+
array(['a', 'b', 'c'], dtype=object)
|
| 702 |
+
|
| 703 |
+
When ``use_na_sentinel=True`` (the default), missing values are indicated in
|
| 704 |
+
the `codes` with the sentinel value ``-1`` and missing values are not
|
| 705 |
+
included in `uniques`.
|
| 706 |
+
|
| 707 |
+
>>> codes, uniques = pd.factorize(np.array(['b', None, 'a', 'c', 'b'], dtype="O"))
|
| 708 |
+
>>> codes
|
| 709 |
+
array([ 0, -1, 1, 2, 0])
|
| 710 |
+
>>> uniques
|
| 711 |
+
array(['b', 'a', 'c'], dtype=object)
|
| 712 |
+
|
| 713 |
+
Thus far, we've only factorized lists (which are internally coerced to
|
| 714 |
+
NumPy arrays). When factorizing pandas objects, the type of `uniques`
|
| 715 |
+
will differ. For Categoricals, a `Categorical` is returned.
|
| 716 |
+
|
| 717 |
+
>>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
|
| 718 |
+
>>> codes, uniques = pd.factorize(cat)
|
| 719 |
+
>>> codes
|
| 720 |
+
array([0, 0, 1])
|
| 721 |
+
>>> uniques
|
| 722 |
+
['a', 'c']
|
| 723 |
+
Categories (3, object): ['a', 'b', 'c']
|
| 724 |
+
|
| 725 |
+
Notice that ``'b'`` is in ``uniques.categories``, despite not being
|
| 726 |
+
present in ``cat.values``.
|
| 727 |
+
|
| 728 |
+
For all other pandas objects, an Index of the appropriate type is
|
| 729 |
+
returned.
|
| 730 |
+
|
| 731 |
+
>>> cat = pd.Series(['a', 'a', 'c'])
|
| 732 |
+
>>> codes, uniques = pd.factorize(cat)
|
| 733 |
+
>>> codes
|
| 734 |
+
array([0, 0, 1])
|
| 735 |
+
>>> uniques
|
| 736 |
+
Index(['a', 'c'], dtype='object')
|
| 737 |
+
|
| 738 |
+
If NaN is in the values, and we want to include NaN in the uniques of the
|
| 739 |
+
values, it can be achieved by setting ``use_na_sentinel=False``.
|
| 740 |
+
|
| 741 |
+
>>> values = np.array([1, 2, 1, np.nan])
|
| 742 |
+
>>> codes, uniques = pd.factorize(values) # default: use_na_sentinel=True
|
| 743 |
+
>>> codes
|
| 744 |
+
array([ 0, 1, 0, -1])
|
| 745 |
+
>>> uniques
|
| 746 |
+
array([1., 2.])
|
| 747 |
+
|
| 748 |
+
>>> codes, uniques = pd.factorize(values, use_na_sentinel=False)
|
| 749 |
+
>>> codes
|
| 750 |
+
array([0, 1, 0, 2])
|
| 751 |
+
>>> uniques
|
| 752 |
+
array([ 1., 2., nan])
|
| 753 |
+
"""
|
| 754 |
+
# Implementation notes: This method is responsible for 3 things
|
| 755 |
+
# 1.) coercing data to array-like (ndarray, Index, extension array)
|
| 756 |
+
# 2.) factorizing codes and uniques
|
| 757 |
+
# 3.) Maybe boxing the uniques in an Index
|
| 758 |
+
#
|
| 759 |
+
# Step 2 is dispatched to extension types (like Categorical). They are
|
| 760 |
+
# responsible only for factorization. All data coercion, sorting and boxing
|
| 761 |
+
# should happen here.
|
| 762 |
+
if isinstance(values, (ABCIndex, ABCSeries)):
|
| 763 |
+
return values.factorize(sort=sort, use_na_sentinel=use_na_sentinel)
|
| 764 |
+
|
| 765 |
+
values = _ensure_arraylike(values, func_name="factorize")
|
| 766 |
+
original = values
|
| 767 |
+
|
| 768 |
+
if (
|
| 769 |
+
isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray))
|
| 770 |
+
and values.freq is not None
|
| 771 |
+
):
|
| 772 |
+
# The presence of 'freq' means we can fast-path sorting and know there
|
| 773 |
+
# aren't NAs
|
| 774 |
+
codes, uniques = values.factorize(sort=sort)
|
| 775 |
+
return codes, uniques
|
| 776 |
+
|
| 777 |
+
elif not isinstance(values, np.ndarray):
|
| 778 |
+
# i.e. ExtensionArray
|
| 779 |
+
codes, uniques = values.factorize(use_na_sentinel=use_na_sentinel)
|
| 780 |
+
|
| 781 |
+
else:
|
| 782 |
+
values = np.asarray(values) # convert DTA/TDA/MultiIndex
|
| 783 |
+
|
| 784 |
+
if not use_na_sentinel and values.dtype == object:
|
| 785 |
+
# factorize can now handle differentiating various types of null values.
|
| 786 |
+
# These can only occur when the array has object dtype.
|
| 787 |
+
# However, for backwards compatibility we only use the null for the
|
| 788 |
+
# provided dtype. This may be revisited in the future, see GH#48476.
|
| 789 |
+
null_mask = isna(values)
|
| 790 |
+
if null_mask.any():
|
| 791 |
+
na_value = na_value_for_dtype(values.dtype, compat=False)
|
| 792 |
+
# Don't modify (potentially user-provided) array
|
| 793 |
+
values = np.where(null_mask, na_value, values)
|
| 794 |
+
|
| 795 |
+
codes, uniques = factorize_array(
|
| 796 |
+
values,
|
| 797 |
+
use_na_sentinel=use_na_sentinel,
|
| 798 |
+
size_hint=size_hint,
|
| 799 |
+
)
|
| 800 |
+
|
| 801 |
+
if sort and len(uniques) > 0:
|
| 802 |
+
uniques, codes = safe_sort(
|
| 803 |
+
uniques,
|
| 804 |
+
codes,
|
| 805 |
+
use_na_sentinel=use_na_sentinel,
|
| 806 |
+
assume_unique=True,
|
| 807 |
+
verify=False,
|
| 808 |
+
)
|
| 809 |
+
|
| 810 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
| 811 |
+
|
| 812 |
+
return codes, uniques
|
| 813 |
+
|
| 814 |
+
|
| 815 |
+
def value_counts(
|
| 816 |
+
values,
|
| 817 |
+
sort: bool = True,
|
| 818 |
+
ascending: bool = False,
|
| 819 |
+
normalize: bool = False,
|
| 820 |
+
bins=None,
|
| 821 |
+
dropna: bool = True,
|
| 822 |
+
) -> Series:
|
| 823 |
+
"""
|
| 824 |
+
Compute a histogram of the counts of non-null values.
|
| 825 |
+
|
| 826 |
+
Parameters
|
| 827 |
+
----------
|
| 828 |
+
values : ndarray (1-d)
|
| 829 |
+
sort : bool, default True
|
| 830 |
+
Sort by values
|
| 831 |
+
ascending : bool, default False
|
| 832 |
+
Sort in ascending order
|
| 833 |
+
normalize: bool, default False
|
| 834 |
+
If True then compute a relative histogram
|
| 835 |
+
bins : integer, optional
|
| 836 |
+
Rather than count values, group them into half-open bins,
|
| 837 |
+
convenience for pd.cut, only works with numeric data
|
| 838 |
+
dropna : bool, default True
|
| 839 |
+
Don't include counts of NaN
|
| 840 |
+
|
| 841 |
+
Returns
|
| 842 |
+
-------
|
| 843 |
+
Series
|
| 844 |
+
"""
|
| 845 |
+
warnings.warn(
|
| 846 |
+
# GH#53493
|
| 847 |
+
"pandas.value_counts is deprecated and will be removed in a "
|
| 848 |
+
"future version. Use pd.Series(obj).value_counts() instead.",
|
| 849 |
+
FutureWarning,
|
| 850 |
+
stacklevel=find_stack_level(),
|
| 851 |
+
)
|
| 852 |
+
return value_counts_internal(
|
| 853 |
+
values,
|
| 854 |
+
sort=sort,
|
| 855 |
+
ascending=ascending,
|
| 856 |
+
normalize=normalize,
|
| 857 |
+
bins=bins,
|
| 858 |
+
dropna=dropna,
|
| 859 |
+
)
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
def value_counts_internal(
|
| 863 |
+
values,
|
| 864 |
+
sort: bool = True,
|
| 865 |
+
ascending: bool = False,
|
| 866 |
+
normalize: bool = False,
|
| 867 |
+
bins=None,
|
| 868 |
+
dropna: bool = True,
|
| 869 |
+
) -> Series:
|
| 870 |
+
from pandas import (
|
| 871 |
+
Index,
|
| 872 |
+
Series,
|
| 873 |
+
)
|
| 874 |
+
|
| 875 |
+
index_name = getattr(values, "name", None)
|
| 876 |
+
name = "proportion" if normalize else "count"
|
| 877 |
+
|
| 878 |
+
if bins is not None:
|
| 879 |
+
from pandas.core.reshape.tile import cut
|
| 880 |
+
|
| 881 |
+
if isinstance(values, Series):
|
| 882 |
+
values = values._values
|
| 883 |
+
|
| 884 |
+
try:
|
| 885 |
+
ii = cut(values, bins, include_lowest=True)
|
| 886 |
+
except TypeError as err:
|
| 887 |
+
raise TypeError("bins argument only works with numeric data.") from err
|
| 888 |
+
|
| 889 |
+
# count, remove nulls (from the index), and but the bins
|
| 890 |
+
result = ii.value_counts(dropna=dropna)
|
| 891 |
+
result.name = name
|
| 892 |
+
result = result[result.index.notna()]
|
| 893 |
+
result.index = result.index.astype("interval")
|
| 894 |
+
result = result.sort_index()
|
| 895 |
+
|
| 896 |
+
# if we are dropna and we have NO values
|
| 897 |
+
if dropna and (result._values == 0).all():
|
| 898 |
+
result = result.iloc[0:0]
|
| 899 |
+
|
| 900 |
+
# normalizing is by len of all (regardless of dropna)
|
| 901 |
+
counts = np.array([len(ii)])
|
| 902 |
+
|
| 903 |
+
else:
|
| 904 |
+
if is_extension_array_dtype(values):
|
| 905 |
+
# handle Categorical and sparse,
|
| 906 |
+
result = Series(values, copy=False)._values.value_counts(dropna=dropna)
|
| 907 |
+
result.name = name
|
| 908 |
+
result.index.name = index_name
|
| 909 |
+
counts = result._values
|
| 910 |
+
if not isinstance(counts, np.ndarray):
|
| 911 |
+
# e.g. ArrowExtensionArray
|
| 912 |
+
counts = np.asarray(counts)
|
| 913 |
+
|
| 914 |
+
elif isinstance(values, ABCMultiIndex):
|
| 915 |
+
# GH49558
|
| 916 |
+
levels = list(range(values.nlevels))
|
| 917 |
+
result = (
|
| 918 |
+
Series(index=values, name=name)
|
| 919 |
+
.groupby(level=levels, dropna=dropna)
|
| 920 |
+
.size()
|
| 921 |
+
)
|
| 922 |
+
result.index.names = values.names
|
| 923 |
+
counts = result._values
|
| 924 |
+
|
| 925 |
+
else:
|
| 926 |
+
values = _ensure_arraylike(values, func_name="value_counts")
|
| 927 |
+
keys, counts, _ = value_counts_arraylike(values, dropna)
|
| 928 |
+
if keys.dtype == np.float16:
|
| 929 |
+
keys = keys.astype(np.float32)
|
| 930 |
+
|
| 931 |
+
# For backwards compatibility, we let Index do its normal type
|
| 932 |
+
# inference, _except_ for if if infers from object to bool.
|
| 933 |
+
idx = Index(keys)
|
| 934 |
+
if idx.dtype == bool and keys.dtype == object:
|
| 935 |
+
idx = idx.astype(object)
|
| 936 |
+
elif (
|
| 937 |
+
idx.dtype != keys.dtype # noqa: PLR1714 # # pylint: disable=R1714
|
| 938 |
+
and idx.dtype != "string[pyarrow_numpy]"
|
| 939 |
+
):
|
| 940 |
+
warnings.warn(
|
| 941 |
+
# GH#56161
|
| 942 |
+
"The behavior of value_counts with object-dtype is deprecated. "
|
| 943 |
+
"In a future version, this will *not* perform dtype inference "
|
| 944 |
+
"on the resulting index. To retain the old behavior, use "
|
| 945 |
+
"`result.index = result.index.infer_objects()`",
|
| 946 |
+
FutureWarning,
|
| 947 |
+
stacklevel=find_stack_level(),
|
| 948 |
+
)
|
| 949 |
+
idx.name = index_name
|
| 950 |
+
|
| 951 |
+
result = Series(counts, index=idx, name=name, copy=False)
|
| 952 |
+
|
| 953 |
+
if sort:
|
| 954 |
+
result = result.sort_values(ascending=ascending)
|
| 955 |
+
|
| 956 |
+
if normalize:
|
| 957 |
+
result = result / counts.sum()
|
| 958 |
+
|
| 959 |
+
return result
|
| 960 |
+
|
| 961 |
+
|
| 962 |
+
# Called once from SparseArray, otherwise could be private
|
| 963 |
+
def value_counts_arraylike(
|
| 964 |
+
values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None
|
| 965 |
+
) -> tuple[ArrayLike, npt.NDArray[np.int64], int]:
|
| 966 |
+
"""
|
| 967 |
+
Parameters
|
| 968 |
+
----------
|
| 969 |
+
values : np.ndarray
|
| 970 |
+
dropna : bool
|
| 971 |
+
mask : np.ndarray[bool] or None, default None
|
| 972 |
+
|
| 973 |
+
Returns
|
| 974 |
+
-------
|
| 975 |
+
uniques : np.ndarray
|
| 976 |
+
counts : np.ndarray[np.int64]
|
| 977 |
+
"""
|
| 978 |
+
original = values
|
| 979 |
+
values = _ensure_data(values)
|
| 980 |
+
|
| 981 |
+
keys, counts, na_counter = htable.value_count(values, dropna, mask=mask)
|
| 982 |
+
|
| 983 |
+
if needs_i8_conversion(original.dtype):
|
| 984 |
+
# datetime, timedelta, or period
|
| 985 |
+
|
| 986 |
+
if dropna:
|
| 987 |
+
mask = keys != iNaT
|
| 988 |
+
keys, counts = keys[mask], counts[mask]
|
| 989 |
+
|
| 990 |
+
res_keys = _reconstruct_data(keys, original.dtype, original)
|
| 991 |
+
return res_keys, counts, na_counter
|
| 992 |
+
|
| 993 |
+
|
| 994 |
+
def duplicated(
|
| 995 |
+
values: ArrayLike,
|
| 996 |
+
keep: Literal["first", "last", False] = "first",
|
| 997 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 998 |
+
) -> npt.NDArray[np.bool_]:
|
| 999 |
+
"""
|
| 1000 |
+
Return boolean ndarray denoting duplicate values.
|
| 1001 |
+
|
| 1002 |
+
Parameters
|
| 1003 |
+
----------
|
| 1004 |
+
values : np.ndarray or ExtensionArray
|
| 1005 |
+
Array over which to check for duplicate values.
|
| 1006 |
+
keep : {'first', 'last', False}, default 'first'
|
| 1007 |
+
- ``first`` : Mark duplicates as ``True`` except for the first
|
| 1008 |
+
occurrence.
|
| 1009 |
+
- ``last`` : Mark duplicates as ``True`` except for the last
|
| 1010 |
+
occurrence.
|
| 1011 |
+
- False : Mark all duplicates as ``True``.
|
| 1012 |
+
mask : ndarray[bool], optional
|
| 1013 |
+
array indicating which elements to exclude from checking
|
| 1014 |
+
|
| 1015 |
+
Returns
|
| 1016 |
+
-------
|
| 1017 |
+
duplicated : ndarray[bool]
|
| 1018 |
+
"""
|
| 1019 |
+
values = _ensure_data(values)
|
| 1020 |
+
return htable.duplicated(values, keep=keep, mask=mask)
|
| 1021 |
+
|
| 1022 |
+
|
| 1023 |
+
def mode(
|
| 1024 |
+
values: ArrayLike, dropna: bool = True, mask: npt.NDArray[np.bool_] | None = None
|
| 1025 |
+
) -> ArrayLike:
|
| 1026 |
+
"""
|
| 1027 |
+
Returns the mode(s) of an array.
|
| 1028 |
+
|
| 1029 |
+
Parameters
|
| 1030 |
+
----------
|
| 1031 |
+
values : array-like
|
| 1032 |
+
Array over which to check for duplicate values.
|
| 1033 |
+
dropna : bool, default True
|
| 1034 |
+
Don't consider counts of NaN/NaT.
|
| 1035 |
+
|
| 1036 |
+
Returns
|
| 1037 |
+
-------
|
| 1038 |
+
np.ndarray or ExtensionArray
|
| 1039 |
+
"""
|
| 1040 |
+
values = _ensure_arraylike(values, func_name="mode")
|
| 1041 |
+
original = values
|
| 1042 |
+
|
| 1043 |
+
if needs_i8_conversion(values.dtype):
|
| 1044 |
+
# Got here with ndarray; dispatch to DatetimeArray/TimedeltaArray.
|
| 1045 |
+
values = ensure_wrapped_if_datetimelike(values)
|
| 1046 |
+
values = cast("ExtensionArray", values)
|
| 1047 |
+
return values._mode(dropna=dropna)
|
| 1048 |
+
|
| 1049 |
+
values = _ensure_data(values)
|
| 1050 |
+
|
| 1051 |
+
npresult, res_mask = htable.mode(values, dropna=dropna, mask=mask)
|
| 1052 |
+
if res_mask is not None:
|
| 1053 |
+
return npresult, res_mask # type: ignore[return-value]
|
| 1054 |
+
|
| 1055 |
+
try:
|
| 1056 |
+
npresult = np.sort(npresult)
|
| 1057 |
+
except TypeError as err:
|
| 1058 |
+
warnings.warn(
|
| 1059 |
+
f"Unable to sort modes: {err}",
|
| 1060 |
+
stacklevel=find_stack_level(),
|
| 1061 |
+
)
|
| 1062 |
+
|
| 1063 |
+
result = _reconstruct_data(npresult, original.dtype, original)
|
| 1064 |
+
return result
|
| 1065 |
+
|
| 1066 |
+
|
| 1067 |
+
def rank(
|
| 1068 |
+
values: ArrayLike,
|
| 1069 |
+
axis: AxisInt = 0,
|
| 1070 |
+
method: str = "average",
|
| 1071 |
+
na_option: str = "keep",
|
| 1072 |
+
ascending: bool = True,
|
| 1073 |
+
pct: bool = False,
|
| 1074 |
+
) -> npt.NDArray[np.float64]:
|
| 1075 |
+
"""
|
| 1076 |
+
Rank the values along a given axis.
|
| 1077 |
+
|
| 1078 |
+
Parameters
|
| 1079 |
+
----------
|
| 1080 |
+
values : np.ndarray or ExtensionArray
|
| 1081 |
+
Array whose values will be ranked. The number of dimensions in this
|
| 1082 |
+
array must not exceed 2.
|
| 1083 |
+
axis : int, default 0
|
| 1084 |
+
Axis over which to perform rankings.
|
| 1085 |
+
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
|
| 1086 |
+
The method by which tiebreaks are broken during the ranking.
|
| 1087 |
+
na_option : {'keep', 'top'}, default 'keep'
|
| 1088 |
+
The method by which NaNs are placed in the ranking.
|
| 1089 |
+
- ``keep``: rank each NaN value with a NaN ranking
|
| 1090 |
+
- ``top``: replace each NaN with either +/- inf so that they
|
| 1091 |
+
there are ranked at the top
|
| 1092 |
+
ascending : bool, default True
|
| 1093 |
+
Whether or not the elements should be ranked in ascending order.
|
| 1094 |
+
pct : bool, default False
|
| 1095 |
+
Whether or not to the display the returned rankings in integer form
|
| 1096 |
+
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
|
| 1097 |
+
"""
|
| 1098 |
+
is_datetimelike = needs_i8_conversion(values.dtype)
|
| 1099 |
+
values = _ensure_data(values)
|
| 1100 |
+
|
| 1101 |
+
if values.ndim == 1:
|
| 1102 |
+
ranks = algos.rank_1d(
|
| 1103 |
+
values,
|
| 1104 |
+
is_datetimelike=is_datetimelike,
|
| 1105 |
+
ties_method=method,
|
| 1106 |
+
ascending=ascending,
|
| 1107 |
+
na_option=na_option,
|
| 1108 |
+
pct=pct,
|
| 1109 |
+
)
|
| 1110 |
+
elif values.ndim == 2:
|
| 1111 |
+
ranks = algos.rank_2d(
|
| 1112 |
+
values,
|
| 1113 |
+
axis=axis,
|
| 1114 |
+
is_datetimelike=is_datetimelike,
|
| 1115 |
+
ties_method=method,
|
| 1116 |
+
ascending=ascending,
|
| 1117 |
+
na_option=na_option,
|
| 1118 |
+
pct=pct,
|
| 1119 |
+
)
|
| 1120 |
+
else:
|
| 1121 |
+
raise TypeError("Array with ndim > 2 are not supported.")
|
| 1122 |
+
|
| 1123 |
+
return ranks
|
| 1124 |
+
|
| 1125 |
+
|
| 1126 |
+
# ---- #
|
| 1127 |
+
# take #
|
| 1128 |
+
# ---- #
|
| 1129 |
+
|
| 1130 |
+
|
| 1131 |
+
def take(
|
| 1132 |
+
arr,
|
| 1133 |
+
indices: TakeIndexer,
|
| 1134 |
+
axis: AxisInt = 0,
|
| 1135 |
+
allow_fill: bool = False,
|
| 1136 |
+
fill_value=None,
|
| 1137 |
+
):
|
| 1138 |
+
"""
|
| 1139 |
+
Take elements from an array.
|
| 1140 |
+
|
| 1141 |
+
Parameters
|
| 1142 |
+
----------
|
| 1143 |
+
arr : array-like or scalar value
|
| 1144 |
+
Non array-likes (sequences/scalars without a dtype) are coerced
|
| 1145 |
+
to an ndarray.
|
| 1146 |
+
|
| 1147 |
+
.. deprecated:: 2.1.0
|
| 1148 |
+
Passing an argument other than a numpy.ndarray, ExtensionArray,
|
| 1149 |
+
Index, or Series is deprecated.
|
| 1150 |
+
|
| 1151 |
+
indices : sequence of int or one-dimensional np.ndarray of int
|
| 1152 |
+
Indices to be taken.
|
| 1153 |
+
axis : int, default 0
|
| 1154 |
+
The axis over which to select values.
|
| 1155 |
+
allow_fill : bool, default False
|
| 1156 |
+
How to handle negative values in `indices`.
|
| 1157 |
+
|
| 1158 |
+
* False: negative values in `indices` indicate positional indices
|
| 1159 |
+
from the right (the default). This is similar to :func:`numpy.take`.
|
| 1160 |
+
|
| 1161 |
+
* True: negative values in `indices` indicate
|
| 1162 |
+
missing values. These values are set to `fill_value`. Any other
|
| 1163 |
+
negative values raise a ``ValueError``.
|
| 1164 |
+
|
| 1165 |
+
fill_value : any, optional
|
| 1166 |
+
Fill value to use for NA-indices when `allow_fill` is True.
|
| 1167 |
+
This may be ``None``, in which case the default NA value for
|
| 1168 |
+
the type (``self.dtype.na_value``) is used.
|
| 1169 |
+
|
| 1170 |
+
For multi-dimensional `arr`, each *element* is filled with
|
| 1171 |
+
`fill_value`.
|
| 1172 |
+
|
| 1173 |
+
Returns
|
| 1174 |
+
-------
|
| 1175 |
+
ndarray or ExtensionArray
|
| 1176 |
+
Same type as the input.
|
| 1177 |
+
|
| 1178 |
+
Raises
|
| 1179 |
+
------
|
| 1180 |
+
IndexError
|
| 1181 |
+
When `indices` is out of bounds for the array.
|
| 1182 |
+
ValueError
|
| 1183 |
+
When the indexer contains negative values other than ``-1``
|
| 1184 |
+
and `allow_fill` is True.
|
| 1185 |
+
|
| 1186 |
+
Notes
|
| 1187 |
+
-----
|
| 1188 |
+
When `allow_fill` is False, `indices` may be whatever dimensionality
|
| 1189 |
+
is accepted by NumPy for `arr`.
|
| 1190 |
+
|
| 1191 |
+
When `allow_fill` is True, `indices` should be 1-D.
|
| 1192 |
+
|
| 1193 |
+
See Also
|
| 1194 |
+
--------
|
| 1195 |
+
numpy.take : Take elements from an array along an axis.
|
| 1196 |
+
|
| 1197 |
+
Examples
|
| 1198 |
+
--------
|
| 1199 |
+
>>> import pandas as pd
|
| 1200 |
+
|
| 1201 |
+
With the default ``allow_fill=False``, negative numbers indicate
|
| 1202 |
+
positional indices from the right.
|
| 1203 |
+
|
| 1204 |
+
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1])
|
| 1205 |
+
array([10, 10, 30])
|
| 1206 |
+
|
| 1207 |
+
Setting ``allow_fill=True`` will place `fill_value` in those positions.
|
| 1208 |
+
|
| 1209 |
+
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
|
| 1210 |
+
array([10., 10., nan])
|
| 1211 |
+
|
| 1212 |
+
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
|
| 1213 |
+
... fill_value=-10)
|
| 1214 |
+
array([ 10, 10, -10])
|
| 1215 |
+
"""
|
| 1216 |
+
if not isinstance(arr, (np.ndarray, ABCExtensionArray, ABCIndex, ABCSeries)):
|
| 1217 |
+
# GH#52981
|
| 1218 |
+
warnings.warn(
|
| 1219 |
+
"pd.api.extensions.take accepting non-standard inputs is deprecated "
|
| 1220 |
+
"and will raise in a future version. Pass either a numpy.ndarray, "
|
| 1221 |
+
"ExtensionArray, Index, or Series instead.",
|
| 1222 |
+
FutureWarning,
|
| 1223 |
+
stacklevel=find_stack_level(),
|
| 1224 |
+
)
|
| 1225 |
+
|
| 1226 |
+
if not is_array_like(arr):
|
| 1227 |
+
arr = np.asarray(arr)
|
| 1228 |
+
|
| 1229 |
+
indices = ensure_platform_int(indices)
|
| 1230 |
+
|
| 1231 |
+
if allow_fill:
|
| 1232 |
+
# Pandas style, -1 means NA
|
| 1233 |
+
validate_indices(indices, arr.shape[axis])
|
| 1234 |
+
result = take_nd(
|
| 1235 |
+
arr, indices, axis=axis, allow_fill=True, fill_value=fill_value
|
| 1236 |
+
)
|
| 1237 |
+
else:
|
| 1238 |
+
# NumPy style
|
| 1239 |
+
result = arr.take(indices, axis=axis)
|
| 1240 |
+
return result
|
| 1241 |
+
|
| 1242 |
+
|
| 1243 |
+
# ------------ #
|
| 1244 |
+
# searchsorted #
|
| 1245 |
+
# ------------ #
|
| 1246 |
+
|
| 1247 |
+
|
| 1248 |
+
def searchsorted(
|
| 1249 |
+
arr: ArrayLike,
|
| 1250 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
| 1251 |
+
side: Literal["left", "right"] = "left",
|
| 1252 |
+
sorter: NumpySorter | None = None,
|
| 1253 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
| 1254 |
+
"""
|
| 1255 |
+
Find indices where elements should be inserted to maintain order.
|
| 1256 |
+
|
| 1257 |
+
Find the indices into a sorted array `arr` (a) such that, if the
|
| 1258 |
+
corresponding elements in `value` were inserted before the indices,
|
| 1259 |
+
the order of `arr` would be preserved.
|
| 1260 |
+
|
| 1261 |
+
Assuming that `arr` is sorted:
|
| 1262 |
+
|
| 1263 |
+
====== ================================
|
| 1264 |
+
`side` returned index `i` satisfies
|
| 1265 |
+
====== ================================
|
| 1266 |
+
left ``arr[i-1] < value <= self[i]``
|
| 1267 |
+
right ``arr[i-1] <= value < self[i]``
|
| 1268 |
+
====== ================================
|
| 1269 |
+
|
| 1270 |
+
Parameters
|
| 1271 |
+
----------
|
| 1272 |
+
arr: np.ndarray, ExtensionArray, Series
|
| 1273 |
+
Input array. If `sorter` is None, then it must be sorted in
|
| 1274 |
+
ascending order, otherwise `sorter` must be an array of indices
|
| 1275 |
+
that sort it.
|
| 1276 |
+
value : array-like or scalar
|
| 1277 |
+
Values to insert into `arr`.
|
| 1278 |
+
side : {'left', 'right'}, optional
|
| 1279 |
+
If 'left', the index of the first suitable location found is given.
|
| 1280 |
+
If 'right', return the last such index. If there is no suitable
|
| 1281 |
+
index, return either 0 or N (where N is the length of `self`).
|
| 1282 |
+
sorter : 1-D array-like, optional
|
| 1283 |
+
Optional array of integer indices that sort array a into ascending
|
| 1284 |
+
order. They are typically the result of argsort.
|
| 1285 |
+
|
| 1286 |
+
Returns
|
| 1287 |
+
-------
|
| 1288 |
+
array of ints or int
|
| 1289 |
+
If value is array-like, array of insertion points.
|
| 1290 |
+
If value is scalar, a single integer.
|
| 1291 |
+
|
| 1292 |
+
See Also
|
| 1293 |
+
--------
|
| 1294 |
+
numpy.searchsorted : Similar method from NumPy.
|
| 1295 |
+
"""
|
| 1296 |
+
if sorter is not None:
|
| 1297 |
+
sorter = ensure_platform_int(sorter)
|
| 1298 |
+
|
| 1299 |
+
if (
|
| 1300 |
+
isinstance(arr, np.ndarray)
|
| 1301 |
+
and arr.dtype.kind in "iu"
|
| 1302 |
+
and (is_integer(value) or is_integer_dtype(value))
|
| 1303 |
+
):
|
| 1304 |
+
# if `arr` and `value` have different dtypes, `arr` would be
|
| 1305 |
+
# recast by numpy, causing a slow search.
|
| 1306 |
+
# Before searching below, we therefore try to give `value` the
|
| 1307 |
+
# same dtype as `arr`, while guarding against integer overflows.
|
| 1308 |
+
iinfo = np.iinfo(arr.dtype.type)
|
| 1309 |
+
value_arr = np.array([value]) if is_integer(value) else np.array(value)
|
| 1310 |
+
if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all():
|
| 1311 |
+
# value within bounds, so no overflow, so can convert value dtype
|
| 1312 |
+
# to dtype of arr
|
| 1313 |
+
dtype = arr.dtype
|
| 1314 |
+
else:
|
| 1315 |
+
dtype = value_arr.dtype
|
| 1316 |
+
|
| 1317 |
+
if is_integer(value):
|
| 1318 |
+
# We know that value is int
|
| 1319 |
+
value = cast(int, dtype.type(value))
|
| 1320 |
+
else:
|
| 1321 |
+
value = pd_array(cast(ArrayLike, value), dtype=dtype)
|
| 1322 |
+
else:
|
| 1323 |
+
# E.g. if `arr` is an array with dtype='datetime64[ns]'
|
| 1324 |
+
# and `value` is a pd.Timestamp, we may need to convert value
|
| 1325 |
+
arr = ensure_wrapped_if_datetimelike(arr)
|
| 1326 |
+
|
| 1327 |
+
# Argument 1 to "searchsorted" of "ndarray" has incompatible type
|
| 1328 |
+
# "Union[NumpyValueArrayLike, ExtensionArray]"; expected "NumpyValueArrayLike"
|
| 1329 |
+
return arr.searchsorted(value, side=side, sorter=sorter) # type: ignore[arg-type]
|
| 1330 |
+
|
| 1331 |
+
|
| 1332 |
+
# ---- #
|
| 1333 |
+
# diff #
|
| 1334 |
+
# ---- #
|
| 1335 |
+
|
| 1336 |
+
_diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"}
|
| 1337 |
+
|
| 1338 |
+
|
| 1339 |
+
def diff(arr, n: int, axis: AxisInt = 0):
|
| 1340 |
+
"""
|
| 1341 |
+
difference of n between self,
|
| 1342 |
+
analogous to s-s.shift(n)
|
| 1343 |
+
|
| 1344 |
+
Parameters
|
| 1345 |
+
----------
|
| 1346 |
+
arr : ndarray or ExtensionArray
|
| 1347 |
+
n : int
|
| 1348 |
+
number of periods
|
| 1349 |
+
axis : {0, 1}
|
| 1350 |
+
axis to shift on
|
| 1351 |
+
stacklevel : int, default 3
|
| 1352 |
+
The stacklevel for the lost dtype warning.
|
| 1353 |
+
|
| 1354 |
+
Returns
|
| 1355 |
+
-------
|
| 1356 |
+
shifted
|
| 1357 |
+
"""
|
| 1358 |
+
|
| 1359 |
+
n = int(n)
|
| 1360 |
+
na = np.nan
|
| 1361 |
+
dtype = arr.dtype
|
| 1362 |
+
|
| 1363 |
+
is_bool = is_bool_dtype(dtype)
|
| 1364 |
+
if is_bool:
|
| 1365 |
+
op = operator.xor
|
| 1366 |
+
else:
|
| 1367 |
+
op = operator.sub
|
| 1368 |
+
|
| 1369 |
+
if isinstance(dtype, NumpyEADtype):
|
| 1370 |
+
# NumpyExtensionArray cannot necessarily hold shifted versions of itself.
|
| 1371 |
+
arr = arr.to_numpy()
|
| 1372 |
+
dtype = arr.dtype
|
| 1373 |
+
|
| 1374 |
+
if not isinstance(arr, np.ndarray):
|
| 1375 |
+
# i.e ExtensionArray
|
| 1376 |
+
if hasattr(arr, f"__{op.__name__}__"):
|
| 1377 |
+
if axis != 0:
|
| 1378 |
+
raise ValueError(f"cannot diff {type(arr).__name__} on axis={axis}")
|
| 1379 |
+
return op(arr, arr.shift(n))
|
| 1380 |
+
else:
|
| 1381 |
+
raise TypeError(
|
| 1382 |
+
f"{type(arr).__name__} has no 'diff' method. "
|
| 1383 |
+
"Convert to a suitable dtype prior to calling 'diff'."
|
| 1384 |
+
)
|
| 1385 |
+
|
| 1386 |
+
is_timedelta = False
|
| 1387 |
+
if arr.dtype.kind in "mM":
|
| 1388 |
+
dtype = np.int64
|
| 1389 |
+
arr = arr.view("i8")
|
| 1390 |
+
na = iNaT
|
| 1391 |
+
is_timedelta = True
|
| 1392 |
+
|
| 1393 |
+
elif is_bool:
|
| 1394 |
+
# We have to cast in order to be able to hold np.nan
|
| 1395 |
+
dtype = np.object_
|
| 1396 |
+
|
| 1397 |
+
elif dtype.kind in "iu":
|
| 1398 |
+
# We have to cast in order to be able to hold np.nan
|
| 1399 |
+
|
| 1400 |
+
# int8, int16 are incompatible with float64,
|
| 1401 |
+
# see https://github.com/cython/cython/issues/2646
|
| 1402 |
+
if arr.dtype.name in ["int8", "int16"]:
|
| 1403 |
+
dtype = np.float32
|
| 1404 |
+
else:
|
| 1405 |
+
dtype = np.float64
|
| 1406 |
+
|
| 1407 |
+
orig_ndim = arr.ndim
|
| 1408 |
+
if orig_ndim == 1:
|
| 1409 |
+
# reshape so we can always use algos.diff_2d
|
| 1410 |
+
arr = arr.reshape(-1, 1)
|
| 1411 |
+
# TODO: require axis == 0
|
| 1412 |
+
|
| 1413 |
+
dtype = np.dtype(dtype)
|
| 1414 |
+
out_arr = np.empty(arr.shape, dtype=dtype)
|
| 1415 |
+
|
| 1416 |
+
na_indexer = [slice(None)] * 2
|
| 1417 |
+
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
|
| 1418 |
+
out_arr[tuple(na_indexer)] = na
|
| 1419 |
+
|
| 1420 |
+
if arr.dtype.name in _diff_special:
|
| 1421 |
+
# TODO: can diff_2d dtype specialization troubles be fixed by defining
|
| 1422 |
+
# out_arr inside diff_2d?
|
| 1423 |
+
algos.diff_2d(arr, out_arr, n, axis, datetimelike=is_timedelta)
|
| 1424 |
+
else:
|
| 1425 |
+
# To keep mypy happy, _res_indexer is a list while res_indexer is
|
| 1426 |
+
# a tuple, ditto for lag_indexer.
|
| 1427 |
+
_res_indexer = [slice(None)] * 2
|
| 1428 |
+
_res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
|
| 1429 |
+
res_indexer = tuple(_res_indexer)
|
| 1430 |
+
|
| 1431 |
+
_lag_indexer = [slice(None)] * 2
|
| 1432 |
+
_lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
|
| 1433 |
+
lag_indexer = tuple(_lag_indexer)
|
| 1434 |
+
|
| 1435 |
+
out_arr[res_indexer] = op(arr[res_indexer], arr[lag_indexer])
|
| 1436 |
+
|
| 1437 |
+
if is_timedelta:
|
| 1438 |
+
out_arr = out_arr.view("timedelta64[ns]")
|
| 1439 |
+
|
| 1440 |
+
if orig_ndim == 1:
|
| 1441 |
+
out_arr = out_arr[:, 0]
|
| 1442 |
+
return out_arr
|
| 1443 |
+
|
| 1444 |
+
|
| 1445 |
+
# --------------------------------------------------------------------
|
| 1446 |
+
# Helper functions
|
| 1447 |
+
|
| 1448 |
+
|
| 1449 |
+
# Note: safe_sort is in algorithms.py instead of sorting.py because it is
|
| 1450 |
+
# low-dependency, is used in this module, and used private methods from
|
| 1451 |
+
# this module.
|
| 1452 |
+
def safe_sort(
|
| 1453 |
+
values: Index | ArrayLike,
|
| 1454 |
+
codes: npt.NDArray[np.intp] | None = None,
|
| 1455 |
+
use_na_sentinel: bool = True,
|
| 1456 |
+
assume_unique: bool = False,
|
| 1457 |
+
verify: bool = True,
|
| 1458 |
+
) -> AnyArrayLike | tuple[AnyArrayLike, np.ndarray]:
|
| 1459 |
+
"""
|
| 1460 |
+
Sort ``values`` and reorder corresponding ``codes``.
|
| 1461 |
+
|
| 1462 |
+
``values`` should be unique if ``codes`` is not None.
|
| 1463 |
+
Safe for use with mixed types (int, str), orders ints before strs.
|
| 1464 |
+
|
| 1465 |
+
Parameters
|
| 1466 |
+
----------
|
| 1467 |
+
values : list-like
|
| 1468 |
+
Sequence; must be unique if ``codes`` is not None.
|
| 1469 |
+
codes : np.ndarray[intp] or None, default None
|
| 1470 |
+
Indices to ``values``. All out of bound indices are treated as
|
| 1471 |
+
"not found" and will be masked with ``-1``.
|
| 1472 |
+
use_na_sentinel : bool, default True
|
| 1473 |
+
If True, the sentinel -1 will be used for NaN values. If False,
|
| 1474 |
+
NaN values will be encoded as non-negative integers and will not drop the
|
| 1475 |
+
NaN from the uniques of the values.
|
| 1476 |
+
assume_unique : bool, default False
|
| 1477 |
+
When True, ``values`` are assumed to be unique, which can speed up
|
| 1478 |
+
the calculation. Ignored when ``codes`` is None.
|
| 1479 |
+
verify : bool, default True
|
| 1480 |
+
Check if codes are out of bound for the values and put out of bound
|
| 1481 |
+
codes equal to ``-1``. If ``verify=False``, it is assumed there
|
| 1482 |
+
are no out of bound codes. Ignored when ``codes`` is None.
|
| 1483 |
+
|
| 1484 |
+
Returns
|
| 1485 |
+
-------
|
| 1486 |
+
ordered : AnyArrayLike
|
| 1487 |
+
Sorted ``values``
|
| 1488 |
+
new_codes : ndarray
|
| 1489 |
+
Reordered ``codes``; returned when ``codes`` is not None.
|
| 1490 |
+
|
| 1491 |
+
Raises
|
| 1492 |
+
------
|
| 1493 |
+
TypeError
|
| 1494 |
+
* If ``values`` is not list-like or if ``codes`` is neither None
|
| 1495 |
+
nor list-like
|
| 1496 |
+
* If ``values`` cannot be sorted
|
| 1497 |
+
ValueError
|
| 1498 |
+
* If ``codes`` is not None and ``values`` contain duplicates.
|
| 1499 |
+
"""
|
| 1500 |
+
if not isinstance(values, (np.ndarray, ABCExtensionArray, ABCIndex)):
|
| 1501 |
+
raise TypeError(
|
| 1502 |
+
"Only np.ndarray, ExtensionArray, and Index objects are allowed to "
|
| 1503 |
+
"be passed to safe_sort as values"
|
| 1504 |
+
)
|
| 1505 |
+
|
| 1506 |
+
sorter = None
|
| 1507 |
+
ordered: AnyArrayLike
|
| 1508 |
+
|
| 1509 |
+
if (
|
| 1510 |
+
not isinstance(values.dtype, ExtensionDtype)
|
| 1511 |
+
and lib.infer_dtype(values, skipna=False) == "mixed-integer"
|
| 1512 |
+
):
|
| 1513 |
+
ordered = _sort_mixed(values)
|
| 1514 |
+
else:
|
| 1515 |
+
try:
|
| 1516 |
+
sorter = values.argsort()
|
| 1517 |
+
ordered = values.take(sorter)
|
| 1518 |
+
except (TypeError, decimal.InvalidOperation):
|
| 1519 |
+
# Previous sorters failed or were not applicable, try `_sort_mixed`
|
| 1520 |
+
# which would work, but which fails for special case of 1d arrays
|
| 1521 |
+
# with tuples.
|
| 1522 |
+
if values.size and isinstance(values[0], tuple):
|
| 1523 |
+
# error: Argument 1 to "_sort_tuples" has incompatible type
|
| 1524 |
+
# "Union[Index, ExtensionArray, ndarray[Any, Any]]"; expected
|
| 1525 |
+
# "ndarray[Any, Any]"
|
| 1526 |
+
ordered = _sort_tuples(values) # type: ignore[arg-type]
|
| 1527 |
+
else:
|
| 1528 |
+
ordered = _sort_mixed(values)
|
| 1529 |
+
|
| 1530 |
+
# codes:
|
| 1531 |
+
|
| 1532 |
+
if codes is None:
|
| 1533 |
+
return ordered
|
| 1534 |
+
|
| 1535 |
+
if not is_list_like(codes):
|
| 1536 |
+
raise TypeError(
|
| 1537 |
+
"Only list-like objects or None are allowed to "
|
| 1538 |
+
"be passed to safe_sort as codes"
|
| 1539 |
+
)
|
| 1540 |
+
codes = ensure_platform_int(np.asarray(codes))
|
| 1541 |
+
|
| 1542 |
+
if not assume_unique and not len(unique(values)) == len(values):
|
| 1543 |
+
raise ValueError("values should be unique if codes is not None")
|
| 1544 |
+
|
| 1545 |
+
if sorter is None:
|
| 1546 |
+
# mixed types
|
| 1547 |
+
# error: Argument 1 to "_get_hashtable_algo" has incompatible type
|
| 1548 |
+
# "Union[Index, ExtensionArray, ndarray[Any, Any]]"; expected
|
| 1549 |
+
# "ndarray[Any, Any]"
|
| 1550 |
+
hash_klass, values = _get_hashtable_algo(values) # type: ignore[arg-type]
|
| 1551 |
+
t = hash_klass(len(values))
|
| 1552 |
+
t.map_locations(values)
|
| 1553 |
+
sorter = ensure_platform_int(t.lookup(ordered))
|
| 1554 |
+
|
| 1555 |
+
if use_na_sentinel:
|
| 1556 |
+
# take_nd is faster, but only works for na_sentinels of -1
|
| 1557 |
+
order2 = sorter.argsort()
|
| 1558 |
+
if verify:
|
| 1559 |
+
mask = (codes < -len(values)) | (codes >= len(values))
|
| 1560 |
+
codes[mask] = 0
|
| 1561 |
+
else:
|
| 1562 |
+
mask = None
|
| 1563 |
+
new_codes = take_nd(order2, codes, fill_value=-1)
|
| 1564 |
+
else:
|
| 1565 |
+
reverse_indexer = np.empty(len(sorter), dtype=int)
|
| 1566 |
+
reverse_indexer.put(sorter, np.arange(len(sorter)))
|
| 1567 |
+
# Out of bound indices will be masked with `-1` next, so we
|
| 1568 |
+
# may deal with them here without performance loss using `mode='wrap'`
|
| 1569 |
+
new_codes = reverse_indexer.take(codes, mode="wrap")
|
| 1570 |
+
|
| 1571 |
+
if use_na_sentinel:
|
| 1572 |
+
mask = codes == -1
|
| 1573 |
+
if verify:
|
| 1574 |
+
mask = mask | (codes < -len(values)) | (codes >= len(values))
|
| 1575 |
+
|
| 1576 |
+
if use_na_sentinel and mask is not None:
|
| 1577 |
+
np.putmask(new_codes, mask, -1)
|
| 1578 |
+
|
| 1579 |
+
return ordered, ensure_platform_int(new_codes)
|
| 1580 |
+
|
| 1581 |
+
|
| 1582 |
+
def _sort_mixed(values) -> AnyArrayLike:
|
| 1583 |
+
"""order ints before strings before nulls in 1d arrays"""
|
| 1584 |
+
str_pos = np.array([isinstance(x, str) for x in values], dtype=bool)
|
| 1585 |
+
null_pos = np.array([isna(x) for x in values], dtype=bool)
|
| 1586 |
+
num_pos = ~str_pos & ~null_pos
|
| 1587 |
+
str_argsort = np.argsort(values[str_pos])
|
| 1588 |
+
num_argsort = np.argsort(values[num_pos])
|
| 1589 |
+
# convert boolean arrays to positional indices, then order by underlying values
|
| 1590 |
+
str_locs = str_pos.nonzero()[0].take(str_argsort)
|
| 1591 |
+
num_locs = num_pos.nonzero()[0].take(num_argsort)
|
| 1592 |
+
null_locs = null_pos.nonzero()[0]
|
| 1593 |
+
locs = np.concatenate([num_locs, str_locs, null_locs])
|
| 1594 |
+
return values.take(locs)
|
| 1595 |
+
|
| 1596 |
+
|
| 1597 |
+
def _sort_tuples(values: np.ndarray) -> np.ndarray:
|
| 1598 |
+
"""
|
| 1599 |
+
Convert array of tuples (1d) to array of arrays (2d).
|
| 1600 |
+
We need to keep the columns separately as they contain different types and
|
| 1601 |
+
nans (can't use `np.sort` as it may fail when str and nan are mixed in a
|
| 1602 |
+
column as types cannot be compared).
|
| 1603 |
+
"""
|
| 1604 |
+
from pandas.core.internals.construction import to_arrays
|
| 1605 |
+
from pandas.core.sorting import lexsort_indexer
|
| 1606 |
+
|
| 1607 |
+
arrays, _ = to_arrays(values, None)
|
| 1608 |
+
indexer = lexsort_indexer(arrays, orders=True)
|
| 1609 |
+
return values[indexer]
|
| 1610 |
+
|
| 1611 |
+
|
| 1612 |
+
def union_with_duplicates(
|
| 1613 |
+
lvals: ArrayLike | Index, rvals: ArrayLike | Index
|
| 1614 |
+
) -> ArrayLike | Index:
|
| 1615 |
+
"""
|
| 1616 |
+
Extracts the union from lvals and rvals with respect to duplicates and nans in
|
| 1617 |
+
both arrays.
|
| 1618 |
+
|
| 1619 |
+
Parameters
|
| 1620 |
+
----------
|
| 1621 |
+
lvals: np.ndarray or ExtensionArray
|
| 1622 |
+
left values which is ordered in front.
|
| 1623 |
+
rvals: np.ndarray or ExtensionArray
|
| 1624 |
+
right values ordered after lvals.
|
| 1625 |
+
|
| 1626 |
+
Returns
|
| 1627 |
+
-------
|
| 1628 |
+
np.ndarray or ExtensionArray
|
| 1629 |
+
Containing the unsorted union of both arrays.
|
| 1630 |
+
|
| 1631 |
+
Notes
|
| 1632 |
+
-----
|
| 1633 |
+
Caller is responsible for ensuring lvals.dtype == rvals.dtype.
|
| 1634 |
+
"""
|
| 1635 |
+
from pandas import Series
|
| 1636 |
+
|
| 1637 |
+
with warnings.catch_warnings():
|
| 1638 |
+
# filter warning from object dtype inference; we will end up discarding
|
| 1639 |
+
# the index here, so the deprecation does not affect the end result here.
|
| 1640 |
+
warnings.filterwarnings(
|
| 1641 |
+
"ignore",
|
| 1642 |
+
"The behavior of value_counts with object-dtype is deprecated",
|
| 1643 |
+
category=FutureWarning,
|
| 1644 |
+
)
|
| 1645 |
+
l_count = value_counts_internal(lvals, dropna=False)
|
| 1646 |
+
r_count = value_counts_internal(rvals, dropna=False)
|
| 1647 |
+
l_count, r_count = l_count.align(r_count, fill_value=0)
|
| 1648 |
+
final_count = np.maximum(l_count.values, r_count.values)
|
| 1649 |
+
final_count = Series(final_count, index=l_count.index, dtype="int", copy=False)
|
| 1650 |
+
if isinstance(lvals, ABCMultiIndex) and isinstance(rvals, ABCMultiIndex):
|
| 1651 |
+
unique_vals = lvals.append(rvals).unique()
|
| 1652 |
+
else:
|
| 1653 |
+
if isinstance(lvals, ABCIndex):
|
| 1654 |
+
lvals = lvals._values
|
| 1655 |
+
if isinstance(rvals, ABCIndex):
|
| 1656 |
+
rvals = rvals._values
|
| 1657 |
+
# error: List item 0 has incompatible type "Union[ExtensionArray,
|
| 1658 |
+
# ndarray[Any, Any], Index]"; expected "Union[ExtensionArray,
|
| 1659 |
+
# ndarray[Any, Any]]"
|
| 1660 |
+
combined = concat_compat([lvals, rvals]) # type: ignore[list-item]
|
| 1661 |
+
unique_vals = unique(combined)
|
| 1662 |
+
unique_vals = ensure_wrapped_if_datetimelike(unique_vals)
|
| 1663 |
+
repeats = final_count.reindex(unique_vals).values
|
| 1664 |
+
return np.repeat(unique_vals, repeats)
|
| 1665 |
+
|
| 1666 |
+
|
| 1667 |
+
def map_array(
|
| 1668 |
+
arr: ArrayLike,
|
| 1669 |
+
mapper,
|
| 1670 |
+
na_action: Literal["ignore"] | None = None,
|
| 1671 |
+
convert: bool = True,
|
| 1672 |
+
) -> np.ndarray | ExtensionArray | Index:
|
| 1673 |
+
"""
|
| 1674 |
+
Map values using an input mapping or function.
|
| 1675 |
+
|
| 1676 |
+
Parameters
|
| 1677 |
+
----------
|
| 1678 |
+
mapper : function, dict, or Series
|
| 1679 |
+
Mapping correspondence.
|
| 1680 |
+
na_action : {None, 'ignore'}, default None
|
| 1681 |
+
If 'ignore', propagate NA values, without passing them to the
|
| 1682 |
+
mapping correspondence.
|
| 1683 |
+
convert : bool, default True
|
| 1684 |
+
Try to find better dtype for elementwise function results. If
|
| 1685 |
+
False, leave as dtype=object.
|
| 1686 |
+
|
| 1687 |
+
Returns
|
| 1688 |
+
-------
|
| 1689 |
+
Union[ndarray, Index, ExtensionArray]
|
| 1690 |
+
The output of the mapping function applied to the array.
|
| 1691 |
+
If the function returns a tuple with more than one element
|
| 1692 |
+
a MultiIndex will be returned.
|
| 1693 |
+
"""
|
| 1694 |
+
if na_action not in (None, "ignore"):
|
| 1695 |
+
msg = f"na_action must either be 'ignore' or None, {na_action} was passed"
|
| 1696 |
+
raise ValueError(msg)
|
| 1697 |
+
|
| 1698 |
+
# we can fastpath dict/Series to an efficient map
|
| 1699 |
+
# as we know that we are not going to have to yield
|
| 1700 |
+
# python types
|
| 1701 |
+
if is_dict_like(mapper):
|
| 1702 |
+
if isinstance(mapper, dict) and hasattr(mapper, "__missing__"):
|
| 1703 |
+
# If a dictionary subclass defines a default value method,
|
| 1704 |
+
# convert mapper to a lookup function (GH #15999).
|
| 1705 |
+
dict_with_default = mapper
|
| 1706 |
+
mapper = lambda x: dict_with_default[
|
| 1707 |
+
np.nan if isinstance(x, float) and np.isnan(x) else x
|
| 1708 |
+
]
|
| 1709 |
+
else:
|
| 1710 |
+
# Dictionary does not have a default. Thus it's safe to
|
| 1711 |
+
# convert to an Series for efficiency.
|
| 1712 |
+
# we specify the keys here to handle the
|
| 1713 |
+
# possibility that they are tuples
|
| 1714 |
+
|
| 1715 |
+
# The return value of mapping with an empty mapper is
|
| 1716 |
+
# expected to be pd.Series(np.nan, ...). As np.nan is
|
| 1717 |
+
# of dtype float64 the return value of this method should
|
| 1718 |
+
# be float64 as well
|
| 1719 |
+
from pandas import Series
|
| 1720 |
+
|
| 1721 |
+
if len(mapper) == 0:
|
| 1722 |
+
mapper = Series(mapper, dtype=np.float64)
|
| 1723 |
+
else:
|
| 1724 |
+
mapper = Series(mapper)
|
| 1725 |
+
|
| 1726 |
+
if isinstance(mapper, ABCSeries):
|
| 1727 |
+
if na_action == "ignore":
|
| 1728 |
+
mapper = mapper[mapper.index.notna()]
|
| 1729 |
+
|
| 1730 |
+
# Since values were input this means we came from either
|
| 1731 |
+
# a dict or a series and mapper should be an index
|
| 1732 |
+
indexer = mapper.index.get_indexer(arr)
|
| 1733 |
+
new_values = take_nd(mapper._values, indexer)
|
| 1734 |
+
|
| 1735 |
+
return new_values
|
| 1736 |
+
|
| 1737 |
+
if not len(arr):
|
| 1738 |
+
return arr.copy()
|
| 1739 |
+
|
| 1740 |
+
# we must convert to python types
|
| 1741 |
+
values = arr.astype(object, copy=False)
|
| 1742 |
+
if na_action is None:
|
| 1743 |
+
return lib.map_infer(values, mapper, convert=convert)
|
| 1744 |
+
else:
|
| 1745 |
+
return lib.map_infer_mask(
|
| 1746 |
+
values, mapper, mask=isna(values).view(np.uint8), convert=convert
|
| 1747 |
+
)
|
videollama2/lib/python3.10/site-packages/pandas/core/api.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pandas._libs import (
|
| 2 |
+
NaT,
|
| 3 |
+
Period,
|
| 4 |
+
Timedelta,
|
| 5 |
+
Timestamp,
|
| 6 |
+
)
|
| 7 |
+
from pandas._libs.missing import NA
|
| 8 |
+
|
| 9 |
+
from pandas.core.dtypes.dtypes import (
|
| 10 |
+
ArrowDtype,
|
| 11 |
+
CategoricalDtype,
|
| 12 |
+
DatetimeTZDtype,
|
| 13 |
+
IntervalDtype,
|
| 14 |
+
PeriodDtype,
|
| 15 |
+
)
|
| 16 |
+
from pandas.core.dtypes.missing import (
|
| 17 |
+
isna,
|
| 18 |
+
isnull,
|
| 19 |
+
notna,
|
| 20 |
+
notnull,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
from pandas.core.algorithms import (
|
| 24 |
+
factorize,
|
| 25 |
+
unique,
|
| 26 |
+
value_counts,
|
| 27 |
+
)
|
| 28 |
+
from pandas.core.arrays import Categorical
|
| 29 |
+
from pandas.core.arrays.boolean import BooleanDtype
|
| 30 |
+
from pandas.core.arrays.floating import (
|
| 31 |
+
Float32Dtype,
|
| 32 |
+
Float64Dtype,
|
| 33 |
+
)
|
| 34 |
+
from pandas.core.arrays.integer import (
|
| 35 |
+
Int8Dtype,
|
| 36 |
+
Int16Dtype,
|
| 37 |
+
Int32Dtype,
|
| 38 |
+
Int64Dtype,
|
| 39 |
+
UInt8Dtype,
|
| 40 |
+
UInt16Dtype,
|
| 41 |
+
UInt32Dtype,
|
| 42 |
+
UInt64Dtype,
|
| 43 |
+
)
|
| 44 |
+
from pandas.core.arrays.string_ import StringDtype
|
| 45 |
+
from pandas.core.construction import array
|
| 46 |
+
from pandas.core.flags import Flags
|
| 47 |
+
from pandas.core.groupby import (
|
| 48 |
+
Grouper,
|
| 49 |
+
NamedAgg,
|
| 50 |
+
)
|
| 51 |
+
from pandas.core.indexes.api import (
|
| 52 |
+
CategoricalIndex,
|
| 53 |
+
DatetimeIndex,
|
| 54 |
+
Index,
|
| 55 |
+
IntervalIndex,
|
| 56 |
+
MultiIndex,
|
| 57 |
+
PeriodIndex,
|
| 58 |
+
RangeIndex,
|
| 59 |
+
TimedeltaIndex,
|
| 60 |
+
)
|
| 61 |
+
from pandas.core.indexes.datetimes import (
|
| 62 |
+
bdate_range,
|
| 63 |
+
date_range,
|
| 64 |
+
)
|
| 65 |
+
from pandas.core.indexes.interval import (
|
| 66 |
+
Interval,
|
| 67 |
+
interval_range,
|
| 68 |
+
)
|
| 69 |
+
from pandas.core.indexes.period import period_range
|
| 70 |
+
from pandas.core.indexes.timedeltas import timedelta_range
|
| 71 |
+
from pandas.core.indexing import IndexSlice
|
| 72 |
+
from pandas.core.series import Series
|
| 73 |
+
from pandas.core.tools.datetimes import to_datetime
|
| 74 |
+
from pandas.core.tools.numeric import to_numeric
|
| 75 |
+
from pandas.core.tools.timedeltas import to_timedelta
|
| 76 |
+
|
| 77 |
+
from pandas.io.formats.format import set_eng_float_format
|
| 78 |
+
from pandas.tseries.offsets import DateOffset
|
| 79 |
+
|
| 80 |
+
# DataFrame needs to be imported after NamedAgg to avoid a circular import
|
| 81 |
+
from pandas.core.frame import DataFrame # isort:skip
|
| 82 |
+
|
| 83 |
+
__all__ = [
|
| 84 |
+
"array",
|
| 85 |
+
"ArrowDtype",
|
| 86 |
+
"bdate_range",
|
| 87 |
+
"BooleanDtype",
|
| 88 |
+
"Categorical",
|
| 89 |
+
"CategoricalDtype",
|
| 90 |
+
"CategoricalIndex",
|
| 91 |
+
"DataFrame",
|
| 92 |
+
"DateOffset",
|
| 93 |
+
"date_range",
|
| 94 |
+
"DatetimeIndex",
|
| 95 |
+
"DatetimeTZDtype",
|
| 96 |
+
"factorize",
|
| 97 |
+
"Flags",
|
| 98 |
+
"Float32Dtype",
|
| 99 |
+
"Float64Dtype",
|
| 100 |
+
"Grouper",
|
| 101 |
+
"Index",
|
| 102 |
+
"IndexSlice",
|
| 103 |
+
"Int16Dtype",
|
| 104 |
+
"Int32Dtype",
|
| 105 |
+
"Int64Dtype",
|
| 106 |
+
"Int8Dtype",
|
| 107 |
+
"Interval",
|
| 108 |
+
"IntervalDtype",
|
| 109 |
+
"IntervalIndex",
|
| 110 |
+
"interval_range",
|
| 111 |
+
"isna",
|
| 112 |
+
"isnull",
|
| 113 |
+
"MultiIndex",
|
| 114 |
+
"NA",
|
| 115 |
+
"NamedAgg",
|
| 116 |
+
"NaT",
|
| 117 |
+
"notna",
|
| 118 |
+
"notnull",
|
| 119 |
+
"Period",
|
| 120 |
+
"PeriodDtype",
|
| 121 |
+
"PeriodIndex",
|
| 122 |
+
"period_range",
|
| 123 |
+
"RangeIndex",
|
| 124 |
+
"Series",
|
| 125 |
+
"set_eng_float_format",
|
| 126 |
+
"StringDtype",
|
| 127 |
+
"Timedelta",
|
| 128 |
+
"TimedeltaIndex",
|
| 129 |
+
"timedelta_range",
|
| 130 |
+
"Timestamp",
|
| 131 |
+
"to_datetime",
|
| 132 |
+
"to_numeric",
|
| 133 |
+
"to_timedelta",
|
| 134 |
+
"UInt16Dtype",
|
| 135 |
+
"UInt32Dtype",
|
| 136 |
+
"UInt64Dtype",
|
| 137 |
+
"UInt8Dtype",
|
| 138 |
+
"unique",
|
| 139 |
+
"value_counts",
|
| 140 |
+
]
|
videollama2/lib/python3.10/site-packages/pandas/core/generic.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/indexing.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/missing.py
ADDED
|
@@ -0,0 +1,1158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Routines for filling missing data.
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from functools import wraps
|
| 7 |
+
from typing import (
|
| 8 |
+
TYPE_CHECKING,
|
| 9 |
+
Any,
|
| 10 |
+
Literal,
|
| 11 |
+
cast,
|
| 12 |
+
overload,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
from pandas._libs import (
|
| 18 |
+
NaT,
|
| 19 |
+
algos,
|
| 20 |
+
lib,
|
| 21 |
+
)
|
| 22 |
+
from pandas._typing import (
|
| 23 |
+
ArrayLike,
|
| 24 |
+
AxisInt,
|
| 25 |
+
F,
|
| 26 |
+
ReindexMethod,
|
| 27 |
+
npt,
|
| 28 |
+
)
|
| 29 |
+
from pandas.compat._optional import import_optional_dependency
|
| 30 |
+
|
| 31 |
+
from pandas.core.dtypes.cast import infer_dtype_from
|
| 32 |
+
from pandas.core.dtypes.common import (
|
| 33 |
+
is_array_like,
|
| 34 |
+
is_bool_dtype,
|
| 35 |
+
is_numeric_dtype,
|
| 36 |
+
is_numeric_v_string_like,
|
| 37 |
+
is_object_dtype,
|
| 38 |
+
needs_i8_conversion,
|
| 39 |
+
)
|
| 40 |
+
from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
| 41 |
+
from pandas.core.dtypes.missing import (
|
| 42 |
+
is_valid_na_for_dtype,
|
| 43 |
+
isna,
|
| 44 |
+
na_value_for_dtype,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
if TYPE_CHECKING:
|
| 48 |
+
from pandas import Index
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def check_value_size(value, mask: npt.NDArray[np.bool_], length: int):
|
| 52 |
+
"""
|
| 53 |
+
Validate the size of the values passed to ExtensionArray.fillna.
|
| 54 |
+
"""
|
| 55 |
+
if is_array_like(value):
|
| 56 |
+
if len(value) != length:
|
| 57 |
+
raise ValueError(
|
| 58 |
+
f"Length of 'value' does not match. Got ({len(value)}) "
|
| 59 |
+
f" expected {length}"
|
| 60 |
+
)
|
| 61 |
+
value = value[mask]
|
| 62 |
+
|
| 63 |
+
return value
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]:
|
| 67 |
+
"""
|
| 68 |
+
Return a masking array of same size/shape as arr
|
| 69 |
+
with entries equaling any member of values_to_mask set to True
|
| 70 |
+
|
| 71 |
+
Parameters
|
| 72 |
+
----------
|
| 73 |
+
arr : ArrayLike
|
| 74 |
+
values_to_mask: list, tuple, or scalar
|
| 75 |
+
|
| 76 |
+
Returns
|
| 77 |
+
-------
|
| 78 |
+
np.ndarray[bool]
|
| 79 |
+
"""
|
| 80 |
+
# When called from Block.replace/replace_list, values_to_mask is a scalar
|
| 81 |
+
# known to be holdable by arr.
|
| 82 |
+
# When called from Series._single_replace, values_to_mask is tuple or list
|
| 83 |
+
dtype, values_to_mask = infer_dtype_from(values_to_mask)
|
| 84 |
+
|
| 85 |
+
if isinstance(dtype, np.dtype):
|
| 86 |
+
values_to_mask = np.array(values_to_mask, dtype=dtype)
|
| 87 |
+
else:
|
| 88 |
+
cls = dtype.construct_array_type()
|
| 89 |
+
if not lib.is_list_like(values_to_mask):
|
| 90 |
+
values_to_mask = [values_to_mask]
|
| 91 |
+
values_to_mask = cls._from_sequence(values_to_mask, dtype=dtype, copy=False)
|
| 92 |
+
|
| 93 |
+
potential_na = False
|
| 94 |
+
if is_object_dtype(arr.dtype):
|
| 95 |
+
# pre-compute mask to avoid comparison to NA
|
| 96 |
+
potential_na = True
|
| 97 |
+
arr_mask = ~isna(arr)
|
| 98 |
+
|
| 99 |
+
na_mask = isna(values_to_mask)
|
| 100 |
+
nonna = values_to_mask[~na_mask]
|
| 101 |
+
|
| 102 |
+
# GH 21977
|
| 103 |
+
mask = np.zeros(arr.shape, dtype=bool)
|
| 104 |
+
if (
|
| 105 |
+
is_numeric_dtype(arr.dtype)
|
| 106 |
+
and not is_bool_dtype(arr.dtype)
|
| 107 |
+
and is_bool_dtype(nonna.dtype)
|
| 108 |
+
):
|
| 109 |
+
pass
|
| 110 |
+
elif (
|
| 111 |
+
is_bool_dtype(arr.dtype)
|
| 112 |
+
and is_numeric_dtype(nonna.dtype)
|
| 113 |
+
and not is_bool_dtype(nonna.dtype)
|
| 114 |
+
):
|
| 115 |
+
pass
|
| 116 |
+
else:
|
| 117 |
+
for x in nonna:
|
| 118 |
+
if is_numeric_v_string_like(arr, x):
|
| 119 |
+
# GH#29553 prevent numpy deprecation warnings
|
| 120 |
+
pass
|
| 121 |
+
else:
|
| 122 |
+
if potential_na:
|
| 123 |
+
new_mask = np.zeros(arr.shape, dtype=np.bool_)
|
| 124 |
+
new_mask[arr_mask] = arr[arr_mask] == x
|
| 125 |
+
else:
|
| 126 |
+
new_mask = arr == x
|
| 127 |
+
|
| 128 |
+
if not isinstance(new_mask, np.ndarray):
|
| 129 |
+
# usually BooleanArray
|
| 130 |
+
new_mask = new_mask.to_numpy(dtype=bool, na_value=False)
|
| 131 |
+
mask |= new_mask
|
| 132 |
+
|
| 133 |
+
if na_mask.any():
|
| 134 |
+
mask |= isna(arr)
|
| 135 |
+
|
| 136 |
+
return mask
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
@overload
|
| 140 |
+
def clean_fill_method(
|
| 141 |
+
method: Literal["ffill", "pad", "bfill", "backfill"],
|
| 142 |
+
*,
|
| 143 |
+
allow_nearest: Literal[False] = ...,
|
| 144 |
+
) -> Literal["pad", "backfill"]:
|
| 145 |
+
...
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
@overload
|
| 149 |
+
def clean_fill_method(
|
| 150 |
+
method: Literal["ffill", "pad", "bfill", "backfill", "nearest"],
|
| 151 |
+
*,
|
| 152 |
+
allow_nearest: Literal[True],
|
| 153 |
+
) -> Literal["pad", "backfill", "nearest"]:
|
| 154 |
+
...
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def clean_fill_method(
|
| 158 |
+
method: Literal["ffill", "pad", "bfill", "backfill", "nearest"],
|
| 159 |
+
*,
|
| 160 |
+
allow_nearest: bool = False,
|
| 161 |
+
) -> Literal["pad", "backfill", "nearest"]:
|
| 162 |
+
if isinstance(method, str):
|
| 163 |
+
# error: Incompatible types in assignment (expression has type "str", variable
|
| 164 |
+
# has type "Literal['ffill', 'pad', 'bfill', 'backfill', 'nearest']")
|
| 165 |
+
method = method.lower() # type: ignore[assignment]
|
| 166 |
+
if method == "ffill":
|
| 167 |
+
method = "pad"
|
| 168 |
+
elif method == "bfill":
|
| 169 |
+
method = "backfill"
|
| 170 |
+
|
| 171 |
+
valid_methods = ["pad", "backfill"]
|
| 172 |
+
expecting = "pad (ffill) or backfill (bfill)"
|
| 173 |
+
if allow_nearest:
|
| 174 |
+
valid_methods.append("nearest")
|
| 175 |
+
expecting = "pad (ffill), backfill (bfill) or nearest"
|
| 176 |
+
if method not in valid_methods:
|
| 177 |
+
raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}")
|
| 178 |
+
return method
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
# interpolation methods that dispatch to np.interp
|
| 182 |
+
|
| 183 |
+
NP_METHODS = ["linear", "time", "index", "values"]
|
| 184 |
+
|
| 185 |
+
# interpolation methods that dispatch to _interpolate_scipy_wrapper
|
| 186 |
+
|
| 187 |
+
SP_METHODS = [
|
| 188 |
+
"nearest",
|
| 189 |
+
"zero",
|
| 190 |
+
"slinear",
|
| 191 |
+
"quadratic",
|
| 192 |
+
"cubic",
|
| 193 |
+
"barycentric",
|
| 194 |
+
"krogh",
|
| 195 |
+
"spline",
|
| 196 |
+
"polynomial",
|
| 197 |
+
"from_derivatives",
|
| 198 |
+
"piecewise_polynomial",
|
| 199 |
+
"pchip",
|
| 200 |
+
"akima",
|
| 201 |
+
"cubicspline",
|
| 202 |
+
]
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def clean_interp_method(method: str, index: Index, **kwargs) -> str:
|
| 206 |
+
order = kwargs.get("order")
|
| 207 |
+
|
| 208 |
+
if method in ("spline", "polynomial") and order is None:
|
| 209 |
+
raise ValueError("You must specify the order of the spline or polynomial.")
|
| 210 |
+
|
| 211 |
+
valid = NP_METHODS + SP_METHODS
|
| 212 |
+
if method not in valid:
|
| 213 |
+
raise ValueError(f"method must be one of {valid}. Got '{method}' instead.")
|
| 214 |
+
|
| 215 |
+
if method in ("krogh", "piecewise_polynomial", "pchip"):
|
| 216 |
+
if not index.is_monotonic_increasing:
|
| 217 |
+
raise ValueError(
|
| 218 |
+
f"{method} interpolation requires that the index be monotonic."
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
return method
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def find_valid_index(how: str, is_valid: npt.NDArray[np.bool_]) -> int | None:
|
| 225 |
+
"""
|
| 226 |
+
Retrieves the positional index of the first valid value.
|
| 227 |
+
|
| 228 |
+
Parameters
|
| 229 |
+
----------
|
| 230 |
+
how : {'first', 'last'}
|
| 231 |
+
Use this parameter to change between the first or last valid index.
|
| 232 |
+
is_valid: np.ndarray
|
| 233 |
+
Mask to find na_values.
|
| 234 |
+
|
| 235 |
+
Returns
|
| 236 |
+
-------
|
| 237 |
+
int or None
|
| 238 |
+
"""
|
| 239 |
+
assert how in ["first", "last"]
|
| 240 |
+
|
| 241 |
+
if len(is_valid) == 0: # early stop
|
| 242 |
+
return None
|
| 243 |
+
|
| 244 |
+
if is_valid.ndim == 2:
|
| 245 |
+
is_valid = is_valid.any(axis=1) # reduce axis 1
|
| 246 |
+
|
| 247 |
+
if how == "first":
|
| 248 |
+
idxpos = is_valid[::].argmax()
|
| 249 |
+
|
| 250 |
+
elif how == "last":
|
| 251 |
+
idxpos = len(is_valid) - 1 - is_valid[::-1].argmax()
|
| 252 |
+
|
| 253 |
+
chk_notna = is_valid[idxpos]
|
| 254 |
+
|
| 255 |
+
if not chk_notna:
|
| 256 |
+
return None
|
| 257 |
+
# Incompatible return value type (got "signedinteger[Any]",
|
| 258 |
+
# expected "Optional[int]")
|
| 259 |
+
return idxpos # type: ignore[return-value]
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def validate_limit_direction(
|
| 263 |
+
limit_direction: str,
|
| 264 |
+
) -> Literal["forward", "backward", "both"]:
|
| 265 |
+
valid_limit_directions = ["forward", "backward", "both"]
|
| 266 |
+
limit_direction = limit_direction.lower()
|
| 267 |
+
if limit_direction not in valid_limit_directions:
|
| 268 |
+
raise ValueError(
|
| 269 |
+
"Invalid limit_direction: expecting one of "
|
| 270 |
+
f"{valid_limit_directions}, got '{limit_direction}'."
|
| 271 |
+
)
|
| 272 |
+
# error: Incompatible return value type (got "str", expected
|
| 273 |
+
# "Literal['forward', 'backward', 'both']")
|
| 274 |
+
return limit_direction # type: ignore[return-value]
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def validate_limit_area(limit_area: str | None) -> Literal["inside", "outside"] | None:
|
| 278 |
+
if limit_area is not None:
|
| 279 |
+
valid_limit_areas = ["inside", "outside"]
|
| 280 |
+
limit_area = limit_area.lower()
|
| 281 |
+
if limit_area not in valid_limit_areas:
|
| 282 |
+
raise ValueError(
|
| 283 |
+
f"Invalid limit_area: expecting one of {valid_limit_areas}, got "
|
| 284 |
+
f"{limit_area}."
|
| 285 |
+
)
|
| 286 |
+
# error: Incompatible return value type (got "Optional[str]", expected
|
| 287 |
+
# "Optional[Literal['inside', 'outside']]")
|
| 288 |
+
return limit_area # type: ignore[return-value]
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def infer_limit_direction(
|
| 292 |
+
limit_direction: Literal["backward", "forward", "both"] | None, method: str
|
| 293 |
+
) -> Literal["backward", "forward", "both"]:
|
| 294 |
+
# Set `limit_direction` depending on `method`
|
| 295 |
+
if limit_direction is None:
|
| 296 |
+
if method in ("backfill", "bfill"):
|
| 297 |
+
limit_direction = "backward"
|
| 298 |
+
else:
|
| 299 |
+
limit_direction = "forward"
|
| 300 |
+
else:
|
| 301 |
+
if method in ("pad", "ffill") and limit_direction != "forward":
|
| 302 |
+
raise ValueError(
|
| 303 |
+
f"`limit_direction` must be 'forward' for method `{method}`"
|
| 304 |
+
)
|
| 305 |
+
if method in ("backfill", "bfill") and limit_direction != "backward":
|
| 306 |
+
raise ValueError(
|
| 307 |
+
f"`limit_direction` must be 'backward' for method `{method}`"
|
| 308 |
+
)
|
| 309 |
+
return limit_direction
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def get_interp_index(method, index: Index) -> Index:
|
| 313 |
+
# create/use the index
|
| 314 |
+
if method == "linear":
|
| 315 |
+
# prior default
|
| 316 |
+
from pandas import Index
|
| 317 |
+
|
| 318 |
+
index = Index(np.arange(len(index)))
|
| 319 |
+
else:
|
| 320 |
+
methods = {"index", "values", "nearest", "time"}
|
| 321 |
+
is_numeric_or_datetime = (
|
| 322 |
+
is_numeric_dtype(index.dtype)
|
| 323 |
+
or isinstance(index.dtype, DatetimeTZDtype)
|
| 324 |
+
or lib.is_np_dtype(index.dtype, "mM")
|
| 325 |
+
)
|
| 326 |
+
if method not in methods and not is_numeric_or_datetime:
|
| 327 |
+
raise ValueError(
|
| 328 |
+
"Index column must be numeric or datetime type when "
|
| 329 |
+
f"using {method} method other than linear. "
|
| 330 |
+
"Try setting a numeric or datetime index column before "
|
| 331 |
+
"interpolating."
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
if isna(index).any():
|
| 335 |
+
raise NotImplementedError(
|
| 336 |
+
"Interpolation with NaNs in the index "
|
| 337 |
+
"has not been implemented. Try filling "
|
| 338 |
+
"those NaNs before interpolating."
|
| 339 |
+
)
|
| 340 |
+
return index
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def interpolate_2d_inplace(
|
| 344 |
+
data: np.ndarray, # floating dtype
|
| 345 |
+
index: Index,
|
| 346 |
+
axis: AxisInt,
|
| 347 |
+
method: str = "linear",
|
| 348 |
+
limit: int | None = None,
|
| 349 |
+
limit_direction: str = "forward",
|
| 350 |
+
limit_area: str | None = None,
|
| 351 |
+
fill_value: Any | None = None,
|
| 352 |
+
mask=None,
|
| 353 |
+
**kwargs,
|
| 354 |
+
) -> None:
|
| 355 |
+
"""
|
| 356 |
+
Column-wise application of _interpolate_1d.
|
| 357 |
+
|
| 358 |
+
Notes
|
| 359 |
+
-----
|
| 360 |
+
Alters 'data' in-place.
|
| 361 |
+
|
| 362 |
+
The signature does differ from _interpolate_1d because it only
|
| 363 |
+
includes what is needed for Block.interpolate.
|
| 364 |
+
"""
|
| 365 |
+
# validate the interp method
|
| 366 |
+
clean_interp_method(method, index, **kwargs)
|
| 367 |
+
|
| 368 |
+
if is_valid_na_for_dtype(fill_value, data.dtype):
|
| 369 |
+
fill_value = na_value_for_dtype(data.dtype, compat=False)
|
| 370 |
+
|
| 371 |
+
if method == "time":
|
| 372 |
+
if not needs_i8_conversion(index.dtype):
|
| 373 |
+
raise ValueError(
|
| 374 |
+
"time-weighted interpolation only works "
|
| 375 |
+
"on Series or DataFrames with a "
|
| 376 |
+
"DatetimeIndex"
|
| 377 |
+
)
|
| 378 |
+
method = "values"
|
| 379 |
+
|
| 380 |
+
limit_direction = validate_limit_direction(limit_direction)
|
| 381 |
+
limit_area_validated = validate_limit_area(limit_area)
|
| 382 |
+
|
| 383 |
+
# default limit is unlimited GH #16282
|
| 384 |
+
limit = algos.validate_limit(nobs=None, limit=limit)
|
| 385 |
+
|
| 386 |
+
indices = _index_to_interp_indices(index, method)
|
| 387 |
+
|
| 388 |
+
def func(yvalues: np.ndarray) -> None:
|
| 389 |
+
# process 1-d slices in the axis direction
|
| 390 |
+
|
| 391 |
+
_interpolate_1d(
|
| 392 |
+
indices=indices,
|
| 393 |
+
yvalues=yvalues,
|
| 394 |
+
method=method,
|
| 395 |
+
limit=limit,
|
| 396 |
+
limit_direction=limit_direction,
|
| 397 |
+
limit_area=limit_area_validated,
|
| 398 |
+
fill_value=fill_value,
|
| 399 |
+
bounds_error=False,
|
| 400 |
+
mask=mask,
|
| 401 |
+
**kwargs,
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
# error: Argument 1 to "apply_along_axis" has incompatible type
|
| 405 |
+
# "Callable[[ndarray[Any, Any]], None]"; expected "Callable[...,
|
| 406 |
+
# Union[_SupportsArray[dtype[<nothing>]], Sequence[_SupportsArray
|
| 407 |
+
# [dtype[<nothing>]]], Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]],
|
| 408 |
+
# Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
|
| 409 |
+
# Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]]]]]"
|
| 410 |
+
np.apply_along_axis(func, axis, data) # type: ignore[arg-type]
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def _index_to_interp_indices(index: Index, method: str) -> np.ndarray:
|
| 414 |
+
"""
|
| 415 |
+
Convert Index to ndarray of indices to pass to NumPy/SciPy.
|
| 416 |
+
"""
|
| 417 |
+
xarr = index._values
|
| 418 |
+
if needs_i8_conversion(xarr.dtype):
|
| 419 |
+
# GH#1646 for dt64tz
|
| 420 |
+
xarr = xarr.view("i8")
|
| 421 |
+
|
| 422 |
+
if method == "linear":
|
| 423 |
+
inds = xarr
|
| 424 |
+
inds = cast(np.ndarray, inds)
|
| 425 |
+
else:
|
| 426 |
+
inds = np.asarray(xarr)
|
| 427 |
+
|
| 428 |
+
if method in ("values", "index"):
|
| 429 |
+
if inds.dtype == np.object_:
|
| 430 |
+
inds = lib.maybe_convert_objects(inds)
|
| 431 |
+
|
| 432 |
+
return inds
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def _interpolate_1d(
|
| 436 |
+
indices: np.ndarray,
|
| 437 |
+
yvalues: np.ndarray,
|
| 438 |
+
method: str = "linear",
|
| 439 |
+
limit: int | None = None,
|
| 440 |
+
limit_direction: str = "forward",
|
| 441 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
| 442 |
+
fill_value: Any | None = None,
|
| 443 |
+
bounds_error: bool = False,
|
| 444 |
+
order: int | None = None,
|
| 445 |
+
mask=None,
|
| 446 |
+
**kwargs,
|
| 447 |
+
) -> None:
|
| 448 |
+
"""
|
| 449 |
+
Logic for the 1-d interpolation. The input
|
| 450 |
+
indices and yvalues will each be 1-d arrays of the same length.
|
| 451 |
+
|
| 452 |
+
Bounds_error is currently hardcoded to False since non-scipy ones don't
|
| 453 |
+
take it as an argument.
|
| 454 |
+
|
| 455 |
+
Notes
|
| 456 |
+
-----
|
| 457 |
+
Fills 'yvalues' in-place.
|
| 458 |
+
"""
|
| 459 |
+
if mask is not None:
|
| 460 |
+
invalid = mask
|
| 461 |
+
else:
|
| 462 |
+
invalid = isna(yvalues)
|
| 463 |
+
valid = ~invalid
|
| 464 |
+
|
| 465 |
+
if not valid.any():
|
| 466 |
+
return
|
| 467 |
+
|
| 468 |
+
if valid.all():
|
| 469 |
+
return
|
| 470 |
+
|
| 471 |
+
# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
|
| 472 |
+
all_nans = set(np.flatnonzero(invalid))
|
| 473 |
+
|
| 474 |
+
first_valid_index = find_valid_index(how="first", is_valid=valid)
|
| 475 |
+
if first_valid_index is None: # no nan found in start
|
| 476 |
+
first_valid_index = 0
|
| 477 |
+
start_nans = set(range(first_valid_index))
|
| 478 |
+
|
| 479 |
+
last_valid_index = find_valid_index(how="last", is_valid=valid)
|
| 480 |
+
if last_valid_index is None: # no nan found in end
|
| 481 |
+
last_valid_index = len(yvalues)
|
| 482 |
+
end_nans = set(range(1 + last_valid_index, len(valid)))
|
| 483 |
+
|
| 484 |
+
# Like the sets above, preserve_nans contains indices of invalid values,
|
| 485 |
+
# but in this case, it is the final set of indices that need to be
|
| 486 |
+
# preserved as NaN after the interpolation.
|
| 487 |
+
|
| 488 |
+
# For example if limit_direction='forward' then preserve_nans will
|
| 489 |
+
# contain indices of NaNs at the beginning of the series, and NaNs that
|
| 490 |
+
# are more than 'limit' away from the prior non-NaN.
|
| 491 |
+
|
| 492 |
+
# set preserve_nans based on direction using _interp_limit
|
| 493 |
+
preserve_nans: list | set
|
| 494 |
+
if limit_direction == "forward":
|
| 495 |
+
preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
|
| 496 |
+
elif limit_direction == "backward":
|
| 497 |
+
preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
|
| 498 |
+
else:
|
| 499 |
+
# both directions... just use _interp_limit
|
| 500 |
+
preserve_nans = set(_interp_limit(invalid, limit, limit))
|
| 501 |
+
|
| 502 |
+
# if limit_area is set, add either mid or outside indices
|
| 503 |
+
# to preserve_nans GH #16284
|
| 504 |
+
if limit_area == "inside":
|
| 505 |
+
# preserve NaNs on the outside
|
| 506 |
+
preserve_nans |= start_nans | end_nans
|
| 507 |
+
elif limit_area == "outside":
|
| 508 |
+
# preserve NaNs on the inside
|
| 509 |
+
mid_nans = all_nans - start_nans - end_nans
|
| 510 |
+
preserve_nans |= mid_nans
|
| 511 |
+
|
| 512 |
+
# sort preserve_nans and convert to list
|
| 513 |
+
preserve_nans = sorted(preserve_nans)
|
| 514 |
+
|
| 515 |
+
is_datetimelike = yvalues.dtype.kind in "mM"
|
| 516 |
+
|
| 517 |
+
if is_datetimelike:
|
| 518 |
+
yvalues = yvalues.view("i8")
|
| 519 |
+
|
| 520 |
+
if method in NP_METHODS:
|
| 521 |
+
# np.interp requires sorted X values, #21037
|
| 522 |
+
|
| 523 |
+
indexer = np.argsort(indices[valid])
|
| 524 |
+
yvalues[invalid] = np.interp(
|
| 525 |
+
indices[invalid], indices[valid][indexer], yvalues[valid][indexer]
|
| 526 |
+
)
|
| 527 |
+
else:
|
| 528 |
+
yvalues[invalid] = _interpolate_scipy_wrapper(
|
| 529 |
+
indices[valid],
|
| 530 |
+
yvalues[valid],
|
| 531 |
+
indices[invalid],
|
| 532 |
+
method=method,
|
| 533 |
+
fill_value=fill_value,
|
| 534 |
+
bounds_error=bounds_error,
|
| 535 |
+
order=order,
|
| 536 |
+
**kwargs,
|
| 537 |
+
)
|
| 538 |
+
|
| 539 |
+
if mask is not None:
|
| 540 |
+
mask[:] = False
|
| 541 |
+
mask[preserve_nans] = True
|
| 542 |
+
elif is_datetimelike:
|
| 543 |
+
yvalues[preserve_nans] = NaT.value
|
| 544 |
+
else:
|
| 545 |
+
yvalues[preserve_nans] = np.nan
|
| 546 |
+
return
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
def _interpolate_scipy_wrapper(
|
| 550 |
+
x: np.ndarray,
|
| 551 |
+
y: np.ndarray,
|
| 552 |
+
new_x: np.ndarray,
|
| 553 |
+
method: str,
|
| 554 |
+
fill_value=None,
|
| 555 |
+
bounds_error: bool = False,
|
| 556 |
+
order=None,
|
| 557 |
+
**kwargs,
|
| 558 |
+
):
|
| 559 |
+
"""
|
| 560 |
+
Passed off to scipy.interpolate.interp1d. method is scipy's kind.
|
| 561 |
+
Returns an array interpolated at new_x. Add any new methods to
|
| 562 |
+
the list in _clean_interp_method.
|
| 563 |
+
"""
|
| 564 |
+
extra = f"{method} interpolation requires SciPy."
|
| 565 |
+
import_optional_dependency("scipy", extra=extra)
|
| 566 |
+
from scipy import interpolate
|
| 567 |
+
|
| 568 |
+
new_x = np.asarray(new_x)
|
| 569 |
+
|
| 570 |
+
# ignores some kwargs that could be passed along.
|
| 571 |
+
alt_methods = {
|
| 572 |
+
"barycentric": interpolate.barycentric_interpolate,
|
| 573 |
+
"krogh": interpolate.krogh_interpolate,
|
| 574 |
+
"from_derivatives": _from_derivatives,
|
| 575 |
+
"piecewise_polynomial": _from_derivatives,
|
| 576 |
+
"cubicspline": _cubicspline_interpolate,
|
| 577 |
+
"akima": _akima_interpolate,
|
| 578 |
+
"pchip": interpolate.pchip_interpolate,
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
interp1d_methods = [
|
| 582 |
+
"nearest",
|
| 583 |
+
"zero",
|
| 584 |
+
"slinear",
|
| 585 |
+
"quadratic",
|
| 586 |
+
"cubic",
|
| 587 |
+
"polynomial",
|
| 588 |
+
]
|
| 589 |
+
if method in interp1d_methods:
|
| 590 |
+
if method == "polynomial":
|
| 591 |
+
kind = order
|
| 592 |
+
else:
|
| 593 |
+
kind = method
|
| 594 |
+
terp = interpolate.interp1d(
|
| 595 |
+
x, y, kind=kind, fill_value=fill_value, bounds_error=bounds_error
|
| 596 |
+
)
|
| 597 |
+
new_y = terp(new_x)
|
| 598 |
+
elif method == "spline":
|
| 599 |
+
# GH #10633, #24014
|
| 600 |
+
if isna(order) or (order <= 0):
|
| 601 |
+
raise ValueError(
|
| 602 |
+
f"order needs to be specified and greater than 0; got order: {order}"
|
| 603 |
+
)
|
| 604 |
+
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
|
| 605 |
+
new_y = terp(new_x)
|
| 606 |
+
else:
|
| 607 |
+
# GH 7295: need to be able to write for some reason
|
| 608 |
+
# in some circumstances: check all three
|
| 609 |
+
if not x.flags.writeable:
|
| 610 |
+
x = x.copy()
|
| 611 |
+
if not y.flags.writeable:
|
| 612 |
+
y = y.copy()
|
| 613 |
+
if not new_x.flags.writeable:
|
| 614 |
+
new_x = new_x.copy()
|
| 615 |
+
terp = alt_methods[method]
|
| 616 |
+
new_y = terp(x, y, new_x, **kwargs)
|
| 617 |
+
return new_y
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
def _from_derivatives(
|
| 621 |
+
xi: np.ndarray,
|
| 622 |
+
yi: np.ndarray,
|
| 623 |
+
x: np.ndarray,
|
| 624 |
+
order=None,
|
| 625 |
+
der: int | list[int] | None = 0,
|
| 626 |
+
extrapolate: bool = False,
|
| 627 |
+
):
|
| 628 |
+
"""
|
| 629 |
+
Convenience function for interpolate.BPoly.from_derivatives.
|
| 630 |
+
|
| 631 |
+
Construct a piecewise polynomial in the Bernstein basis, compatible
|
| 632 |
+
with the specified values and derivatives at breakpoints.
|
| 633 |
+
|
| 634 |
+
Parameters
|
| 635 |
+
----------
|
| 636 |
+
xi : array-like
|
| 637 |
+
sorted 1D array of x-coordinates
|
| 638 |
+
yi : array-like or list of array-likes
|
| 639 |
+
yi[i][j] is the j-th derivative known at xi[i]
|
| 640 |
+
order: None or int or array-like of ints. Default: None.
|
| 641 |
+
Specifies the degree of local polynomials. If not None, some
|
| 642 |
+
derivatives are ignored.
|
| 643 |
+
der : int or list
|
| 644 |
+
How many derivatives to extract; None for all potentially nonzero
|
| 645 |
+
derivatives (that is a number equal to the number of points), or a
|
| 646 |
+
list of derivatives to extract. This number includes the function
|
| 647 |
+
value as 0th derivative.
|
| 648 |
+
extrapolate : bool, optional
|
| 649 |
+
Whether to extrapolate to ouf-of-bounds points based on first and last
|
| 650 |
+
intervals, or to return NaNs. Default: True.
|
| 651 |
+
|
| 652 |
+
See Also
|
| 653 |
+
--------
|
| 654 |
+
scipy.interpolate.BPoly.from_derivatives
|
| 655 |
+
|
| 656 |
+
Returns
|
| 657 |
+
-------
|
| 658 |
+
y : scalar or array-like
|
| 659 |
+
The result, of length R or length M or M by R.
|
| 660 |
+
"""
|
| 661 |
+
from scipy import interpolate
|
| 662 |
+
|
| 663 |
+
# return the method for compat with scipy version & backwards compat
|
| 664 |
+
method = interpolate.BPoly.from_derivatives
|
| 665 |
+
m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate)
|
| 666 |
+
|
| 667 |
+
return m(x)
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
def _akima_interpolate(
|
| 671 |
+
xi: np.ndarray,
|
| 672 |
+
yi: np.ndarray,
|
| 673 |
+
x: np.ndarray,
|
| 674 |
+
der: int | list[int] | None = 0,
|
| 675 |
+
axis: AxisInt = 0,
|
| 676 |
+
):
|
| 677 |
+
"""
|
| 678 |
+
Convenience function for akima interpolation.
|
| 679 |
+
xi and yi are arrays of values used to approximate some function f,
|
| 680 |
+
with ``yi = f(xi)``.
|
| 681 |
+
|
| 682 |
+
See `Akima1DInterpolator` for details.
|
| 683 |
+
|
| 684 |
+
Parameters
|
| 685 |
+
----------
|
| 686 |
+
xi : np.ndarray
|
| 687 |
+
A sorted list of x-coordinates, of length N.
|
| 688 |
+
yi : np.ndarray
|
| 689 |
+
A 1-D array of real values. `yi`'s length along the interpolation
|
| 690 |
+
axis must be equal to the length of `xi`. If N-D array, use axis
|
| 691 |
+
parameter to select correct axis.
|
| 692 |
+
x : np.ndarray
|
| 693 |
+
Of length M.
|
| 694 |
+
der : int, optional
|
| 695 |
+
How many derivatives to extract; None for all potentially
|
| 696 |
+
nonzero derivatives (that is a number equal to the number
|
| 697 |
+
of points), or a list of derivatives to extract. This number
|
| 698 |
+
includes the function value as 0th derivative.
|
| 699 |
+
axis : int, optional
|
| 700 |
+
Axis in the yi array corresponding to the x-coordinate values.
|
| 701 |
+
|
| 702 |
+
See Also
|
| 703 |
+
--------
|
| 704 |
+
scipy.interpolate.Akima1DInterpolator
|
| 705 |
+
|
| 706 |
+
Returns
|
| 707 |
+
-------
|
| 708 |
+
y : scalar or array-like
|
| 709 |
+
The result, of length R or length M or M by R,
|
| 710 |
+
|
| 711 |
+
"""
|
| 712 |
+
from scipy import interpolate
|
| 713 |
+
|
| 714 |
+
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
|
| 715 |
+
|
| 716 |
+
return P(x, nu=der)
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
def _cubicspline_interpolate(
|
| 720 |
+
xi: np.ndarray,
|
| 721 |
+
yi: np.ndarray,
|
| 722 |
+
x: np.ndarray,
|
| 723 |
+
axis: AxisInt = 0,
|
| 724 |
+
bc_type: str | tuple[Any, Any] = "not-a-knot",
|
| 725 |
+
extrapolate=None,
|
| 726 |
+
):
|
| 727 |
+
"""
|
| 728 |
+
Convenience function for cubic spline data interpolator.
|
| 729 |
+
|
| 730 |
+
See `scipy.interpolate.CubicSpline` for details.
|
| 731 |
+
|
| 732 |
+
Parameters
|
| 733 |
+
----------
|
| 734 |
+
xi : np.ndarray, shape (n,)
|
| 735 |
+
1-d array containing values of the independent variable.
|
| 736 |
+
Values must be real, finite and in strictly increasing order.
|
| 737 |
+
yi : np.ndarray
|
| 738 |
+
Array containing values of the dependent variable. It can have
|
| 739 |
+
arbitrary number of dimensions, but the length along ``axis``
|
| 740 |
+
(see below) must match the length of ``x``. Values must be finite.
|
| 741 |
+
x : np.ndarray, shape (m,)
|
| 742 |
+
axis : int, optional
|
| 743 |
+
Axis along which `y` is assumed to be varying. Meaning that for
|
| 744 |
+
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
|
| 745 |
+
Default is 0.
|
| 746 |
+
bc_type : string or 2-tuple, optional
|
| 747 |
+
Boundary condition type. Two additional equations, given by the
|
| 748 |
+
boundary conditions, are required to determine all coefficients of
|
| 749 |
+
polynomials on each segment [2]_.
|
| 750 |
+
If `bc_type` is a string, then the specified condition will be applied
|
| 751 |
+
at both ends of a spline. Available conditions are:
|
| 752 |
+
* 'not-a-knot' (default): The first and second segment at a curve end
|
| 753 |
+
are the same polynomial. It is a good default when there is no
|
| 754 |
+
information on boundary conditions.
|
| 755 |
+
* 'periodic': The interpolated functions is assumed to be periodic
|
| 756 |
+
of period ``x[-1] - x[0]``. The first and last value of `y` must be
|
| 757 |
+
identical: ``y[0] == y[-1]``. This boundary condition will result in
|
| 758 |
+
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
|
| 759 |
+
* 'clamped': The first derivative at curves ends are zero. Assuming
|
| 760 |
+
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
|
| 761 |
+
* 'natural': The second derivative at curve ends are zero. Assuming
|
| 762 |
+
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
|
| 763 |
+
If `bc_type` is a 2-tuple, the first and the second value will be
|
| 764 |
+
applied at the curve start and end respectively. The tuple values can
|
| 765 |
+
be one of the previously mentioned strings (except 'periodic') or a
|
| 766 |
+
tuple `(order, deriv_values)` allowing to specify arbitrary
|
| 767 |
+
derivatives at curve ends:
|
| 768 |
+
* `order`: the derivative order, 1 or 2.
|
| 769 |
+
* `deriv_value`: array-like containing derivative values, shape must
|
| 770 |
+
be the same as `y`, excluding ``axis`` dimension. For example, if
|
| 771 |
+
`y` is 1D, then `deriv_value` must be a scalar. If `y` is 3D with
|
| 772 |
+
the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
|
| 773 |
+
and have the shape (n0, n1).
|
| 774 |
+
extrapolate : {bool, 'periodic', None}, optional
|
| 775 |
+
If bool, determines whether to extrapolate to out-of-bounds points
|
| 776 |
+
based on first and last intervals, or to return NaNs. If 'periodic',
|
| 777 |
+
periodic extrapolation is used. If None (default), ``extrapolate`` is
|
| 778 |
+
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
|
| 779 |
+
|
| 780 |
+
See Also
|
| 781 |
+
--------
|
| 782 |
+
scipy.interpolate.CubicHermiteSpline
|
| 783 |
+
|
| 784 |
+
Returns
|
| 785 |
+
-------
|
| 786 |
+
y : scalar or array-like
|
| 787 |
+
The result, of shape (m,)
|
| 788 |
+
|
| 789 |
+
References
|
| 790 |
+
----------
|
| 791 |
+
.. [1] `Cubic Spline Interpolation
|
| 792 |
+
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
|
| 793 |
+
on Wikiversity.
|
| 794 |
+
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
|
| 795 |
+
"""
|
| 796 |
+
from scipy import interpolate
|
| 797 |
+
|
| 798 |
+
P = interpolate.CubicSpline(
|
| 799 |
+
xi, yi, axis=axis, bc_type=bc_type, extrapolate=extrapolate
|
| 800 |
+
)
|
| 801 |
+
|
| 802 |
+
return P(x)
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
def _interpolate_with_limit_area(
|
| 806 |
+
values: np.ndarray,
|
| 807 |
+
method: Literal["pad", "backfill"],
|
| 808 |
+
limit: int | None,
|
| 809 |
+
limit_area: Literal["inside", "outside"],
|
| 810 |
+
) -> None:
|
| 811 |
+
"""
|
| 812 |
+
Apply interpolation and limit_area logic to values along a to-be-specified axis.
|
| 813 |
+
|
| 814 |
+
Parameters
|
| 815 |
+
----------
|
| 816 |
+
values: np.ndarray
|
| 817 |
+
Input array.
|
| 818 |
+
method: str
|
| 819 |
+
Interpolation method. Could be "bfill" or "pad"
|
| 820 |
+
limit: int, optional
|
| 821 |
+
Index limit on interpolation.
|
| 822 |
+
limit_area: {'inside', 'outside'}
|
| 823 |
+
Limit area for interpolation.
|
| 824 |
+
|
| 825 |
+
Notes
|
| 826 |
+
-----
|
| 827 |
+
Modifies values in-place.
|
| 828 |
+
"""
|
| 829 |
+
|
| 830 |
+
invalid = isna(values)
|
| 831 |
+
is_valid = ~invalid
|
| 832 |
+
|
| 833 |
+
if not invalid.all():
|
| 834 |
+
first = find_valid_index(how="first", is_valid=is_valid)
|
| 835 |
+
if first is None:
|
| 836 |
+
first = 0
|
| 837 |
+
last = find_valid_index(how="last", is_valid=is_valid)
|
| 838 |
+
if last is None:
|
| 839 |
+
last = len(values)
|
| 840 |
+
|
| 841 |
+
pad_or_backfill_inplace(
|
| 842 |
+
values,
|
| 843 |
+
method=method,
|
| 844 |
+
limit=limit,
|
| 845 |
+
limit_area=limit_area,
|
| 846 |
+
)
|
| 847 |
+
|
| 848 |
+
if limit_area == "inside":
|
| 849 |
+
invalid[first : last + 1] = False
|
| 850 |
+
elif limit_area == "outside":
|
| 851 |
+
invalid[:first] = invalid[last + 1 :] = False
|
| 852 |
+
else:
|
| 853 |
+
raise ValueError("limit_area should be 'inside' or 'outside'")
|
| 854 |
+
|
| 855 |
+
values[invalid] = np.nan
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
def pad_or_backfill_inplace(
|
| 859 |
+
values: np.ndarray,
|
| 860 |
+
method: Literal["pad", "backfill"] = "pad",
|
| 861 |
+
axis: AxisInt = 0,
|
| 862 |
+
limit: int | None = None,
|
| 863 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
| 864 |
+
) -> None:
|
| 865 |
+
"""
|
| 866 |
+
Perform an actual interpolation of values, values will be make 2-d if
|
| 867 |
+
needed fills inplace, returns the result.
|
| 868 |
+
|
| 869 |
+
Parameters
|
| 870 |
+
----------
|
| 871 |
+
values: np.ndarray
|
| 872 |
+
Input array.
|
| 873 |
+
method: str, default "pad"
|
| 874 |
+
Interpolation method. Could be "bfill" or "pad"
|
| 875 |
+
axis: 0 or 1
|
| 876 |
+
Interpolation axis
|
| 877 |
+
limit: int, optional
|
| 878 |
+
Index limit on interpolation.
|
| 879 |
+
limit_area: str, optional
|
| 880 |
+
Limit area for interpolation. Can be "inside" or "outside"
|
| 881 |
+
|
| 882 |
+
Notes
|
| 883 |
+
-----
|
| 884 |
+
Modifies values in-place.
|
| 885 |
+
"""
|
| 886 |
+
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
|
| 887 |
+
|
| 888 |
+
# reshape a 1 dim if needed
|
| 889 |
+
if values.ndim == 1:
|
| 890 |
+
if axis != 0: # pragma: no cover
|
| 891 |
+
raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0")
|
| 892 |
+
values = values.reshape(tuple((1,) + values.shape))
|
| 893 |
+
|
| 894 |
+
method = clean_fill_method(method)
|
| 895 |
+
tvalues = transf(values)
|
| 896 |
+
|
| 897 |
+
func = get_fill_func(method, ndim=2)
|
| 898 |
+
# _pad_2d and _backfill_2d both modify tvalues inplace
|
| 899 |
+
func(tvalues, limit=limit, limit_area=limit_area)
|
| 900 |
+
|
| 901 |
+
|
| 902 |
+
def _fillna_prep(
|
| 903 |
+
values, mask: npt.NDArray[np.bool_] | None = None
|
| 904 |
+
) -> npt.NDArray[np.bool_]:
|
| 905 |
+
# boilerplate for _pad_1d, _backfill_1d, _pad_2d, _backfill_2d
|
| 906 |
+
|
| 907 |
+
if mask is None:
|
| 908 |
+
mask = isna(values)
|
| 909 |
+
|
| 910 |
+
return mask
|
| 911 |
+
|
| 912 |
+
|
| 913 |
+
def _datetimelike_compat(func: F) -> F:
|
| 914 |
+
"""
|
| 915 |
+
Wrapper to handle datetime64 and timedelta64 dtypes.
|
| 916 |
+
"""
|
| 917 |
+
|
| 918 |
+
@wraps(func)
|
| 919 |
+
def new_func(
|
| 920 |
+
values,
|
| 921 |
+
limit: int | None = None,
|
| 922 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
| 923 |
+
mask=None,
|
| 924 |
+
):
|
| 925 |
+
if needs_i8_conversion(values.dtype):
|
| 926 |
+
if mask is None:
|
| 927 |
+
# This needs to occur before casting to int64
|
| 928 |
+
mask = isna(values)
|
| 929 |
+
|
| 930 |
+
result, mask = func(
|
| 931 |
+
values.view("i8"), limit=limit, limit_area=limit_area, mask=mask
|
| 932 |
+
)
|
| 933 |
+
return result.view(values.dtype), mask
|
| 934 |
+
|
| 935 |
+
return func(values, limit=limit, limit_area=limit_area, mask=mask)
|
| 936 |
+
|
| 937 |
+
return cast(F, new_func)
|
| 938 |
+
|
| 939 |
+
|
| 940 |
+
@_datetimelike_compat
|
| 941 |
+
def _pad_1d(
|
| 942 |
+
values: np.ndarray,
|
| 943 |
+
limit: int | None = None,
|
| 944 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
| 945 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 946 |
+
) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
|
| 947 |
+
mask = _fillna_prep(values, mask)
|
| 948 |
+
if limit_area is not None and not mask.all():
|
| 949 |
+
_fill_limit_area_1d(mask, limit_area)
|
| 950 |
+
algos.pad_inplace(values, mask, limit=limit)
|
| 951 |
+
return values, mask
|
| 952 |
+
|
| 953 |
+
|
| 954 |
+
@_datetimelike_compat
|
| 955 |
+
def _backfill_1d(
|
| 956 |
+
values: np.ndarray,
|
| 957 |
+
limit: int | None = None,
|
| 958 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
| 959 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 960 |
+
) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
|
| 961 |
+
mask = _fillna_prep(values, mask)
|
| 962 |
+
if limit_area is not None and not mask.all():
|
| 963 |
+
_fill_limit_area_1d(mask, limit_area)
|
| 964 |
+
algos.backfill_inplace(values, mask, limit=limit)
|
| 965 |
+
return values, mask
|
| 966 |
+
|
| 967 |
+
|
| 968 |
+
@_datetimelike_compat
|
| 969 |
+
def _pad_2d(
|
| 970 |
+
values: np.ndarray,
|
| 971 |
+
limit: int | None = None,
|
| 972 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
| 973 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 974 |
+
):
|
| 975 |
+
mask = _fillna_prep(values, mask)
|
| 976 |
+
if limit_area is not None:
|
| 977 |
+
_fill_limit_area_2d(mask, limit_area)
|
| 978 |
+
|
| 979 |
+
if values.size:
|
| 980 |
+
algos.pad_2d_inplace(values, mask, limit=limit)
|
| 981 |
+
else:
|
| 982 |
+
# for test coverage
|
| 983 |
+
pass
|
| 984 |
+
return values, mask
|
| 985 |
+
|
| 986 |
+
|
| 987 |
+
@_datetimelike_compat
|
| 988 |
+
def _backfill_2d(
|
| 989 |
+
values,
|
| 990 |
+
limit: int | None = None,
|
| 991 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
| 992 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 993 |
+
):
|
| 994 |
+
mask = _fillna_prep(values, mask)
|
| 995 |
+
if limit_area is not None:
|
| 996 |
+
_fill_limit_area_2d(mask, limit_area)
|
| 997 |
+
|
| 998 |
+
if values.size:
|
| 999 |
+
algos.backfill_2d_inplace(values, mask, limit=limit)
|
| 1000 |
+
else:
|
| 1001 |
+
# for test coverage
|
| 1002 |
+
pass
|
| 1003 |
+
return values, mask
|
| 1004 |
+
|
| 1005 |
+
|
| 1006 |
+
def _fill_limit_area_1d(
|
| 1007 |
+
mask: npt.NDArray[np.bool_], limit_area: Literal["outside", "inside"]
|
| 1008 |
+
) -> None:
|
| 1009 |
+
"""Prepare 1d mask for ffill/bfill with limit_area.
|
| 1010 |
+
|
| 1011 |
+
Caller is responsible for checking at least one value of mask is False.
|
| 1012 |
+
When called, mask will no longer faithfully represent when
|
| 1013 |
+
the corresponding are NA or not.
|
| 1014 |
+
|
| 1015 |
+
Parameters
|
| 1016 |
+
----------
|
| 1017 |
+
mask : np.ndarray[bool, ndim=1]
|
| 1018 |
+
Mask representing NA values when filling.
|
| 1019 |
+
limit_area : { "outside", "inside" }
|
| 1020 |
+
Whether to limit filling to outside or inside the outer most non-NA value.
|
| 1021 |
+
"""
|
| 1022 |
+
neg_mask = ~mask
|
| 1023 |
+
first = neg_mask.argmax()
|
| 1024 |
+
last = len(neg_mask) - neg_mask[::-1].argmax() - 1
|
| 1025 |
+
if limit_area == "inside":
|
| 1026 |
+
mask[:first] = False
|
| 1027 |
+
mask[last + 1 :] = False
|
| 1028 |
+
elif limit_area == "outside":
|
| 1029 |
+
mask[first + 1 : last] = False
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
def _fill_limit_area_2d(
|
| 1033 |
+
mask: npt.NDArray[np.bool_], limit_area: Literal["outside", "inside"]
|
| 1034 |
+
) -> None:
|
| 1035 |
+
"""Prepare 2d mask for ffill/bfill with limit_area.
|
| 1036 |
+
|
| 1037 |
+
When called, mask will no longer faithfully represent when
|
| 1038 |
+
the corresponding are NA or not.
|
| 1039 |
+
|
| 1040 |
+
Parameters
|
| 1041 |
+
----------
|
| 1042 |
+
mask : np.ndarray[bool, ndim=1]
|
| 1043 |
+
Mask representing NA values when filling.
|
| 1044 |
+
limit_area : { "outside", "inside" }
|
| 1045 |
+
Whether to limit filling to outside or inside the outer most non-NA value.
|
| 1046 |
+
"""
|
| 1047 |
+
neg_mask = ~mask.T
|
| 1048 |
+
if limit_area == "outside":
|
| 1049 |
+
# Identify inside
|
| 1050 |
+
la_mask = (
|
| 1051 |
+
np.maximum.accumulate(neg_mask, axis=0)
|
| 1052 |
+
& np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1]
|
| 1053 |
+
)
|
| 1054 |
+
else:
|
| 1055 |
+
# Identify outside
|
| 1056 |
+
la_mask = (
|
| 1057 |
+
~np.maximum.accumulate(neg_mask, axis=0)
|
| 1058 |
+
| ~np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1]
|
| 1059 |
+
)
|
| 1060 |
+
mask[la_mask.T] = False
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
_fill_methods = {"pad": _pad_1d, "backfill": _backfill_1d}
|
| 1064 |
+
|
| 1065 |
+
|
| 1066 |
+
def get_fill_func(method, ndim: int = 1):
|
| 1067 |
+
method = clean_fill_method(method)
|
| 1068 |
+
if ndim == 1:
|
| 1069 |
+
return _fill_methods[method]
|
| 1070 |
+
return {"pad": _pad_2d, "backfill": _backfill_2d}[method]
|
| 1071 |
+
|
| 1072 |
+
|
| 1073 |
+
def clean_reindex_fill_method(method) -> ReindexMethod | None:
|
| 1074 |
+
if method is None:
|
| 1075 |
+
return None
|
| 1076 |
+
return clean_fill_method(method, allow_nearest=True)
|
| 1077 |
+
|
| 1078 |
+
|
| 1079 |
+
def _interp_limit(
|
| 1080 |
+
invalid: npt.NDArray[np.bool_], fw_limit: int | None, bw_limit: int | None
|
| 1081 |
+
):
|
| 1082 |
+
"""
|
| 1083 |
+
Get indexers of values that won't be filled
|
| 1084 |
+
because they exceed the limits.
|
| 1085 |
+
|
| 1086 |
+
Parameters
|
| 1087 |
+
----------
|
| 1088 |
+
invalid : np.ndarray[bool]
|
| 1089 |
+
fw_limit : int or None
|
| 1090 |
+
forward limit to index
|
| 1091 |
+
bw_limit : int or None
|
| 1092 |
+
backward limit to index
|
| 1093 |
+
|
| 1094 |
+
Returns
|
| 1095 |
+
-------
|
| 1096 |
+
set of indexers
|
| 1097 |
+
|
| 1098 |
+
Notes
|
| 1099 |
+
-----
|
| 1100 |
+
This is equivalent to the more readable, but slower
|
| 1101 |
+
|
| 1102 |
+
.. code-block:: python
|
| 1103 |
+
|
| 1104 |
+
def _interp_limit(invalid, fw_limit, bw_limit):
|
| 1105 |
+
for x in np.where(invalid)[0]:
|
| 1106 |
+
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
|
| 1107 |
+
yield x
|
| 1108 |
+
"""
|
| 1109 |
+
# handle forward first; the backward direction is the same except
|
| 1110 |
+
# 1. operate on the reversed array
|
| 1111 |
+
# 2. subtract the returned indices from N - 1
|
| 1112 |
+
N = len(invalid)
|
| 1113 |
+
f_idx = set()
|
| 1114 |
+
b_idx = set()
|
| 1115 |
+
|
| 1116 |
+
def inner(invalid, limit: int):
|
| 1117 |
+
limit = min(limit, N)
|
| 1118 |
+
windowed = _rolling_window(invalid, limit + 1).all(1)
|
| 1119 |
+
idx = set(np.where(windowed)[0] + limit) | set(
|
| 1120 |
+
np.where((~invalid[: limit + 1]).cumsum() == 0)[0]
|
| 1121 |
+
)
|
| 1122 |
+
return idx
|
| 1123 |
+
|
| 1124 |
+
if fw_limit is not None:
|
| 1125 |
+
if fw_limit == 0:
|
| 1126 |
+
f_idx = set(np.where(invalid)[0])
|
| 1127 |
+
else:
|
| 1128 |
+
f_idx = inner(invalid, fw_limit)
|
| 1129 |
+
|
| 1130 |
+
if bw_limit is not None:
|
| 1131 |
+
if bw_limit == 0:
|
| 1132 |
+
# then we don't even need to care about backwards
|
| 1133 |
+
# just use forwards
|
| 1134 |
+
return f_idx
|
| 1135 |
+
else:
|
| 1136 |
+
b_idx_inv = list(inner(invalid[::-1], bw_limit))
|
| 1137 |
+
b_idx = set(N - 1 - np.asarray(b_idx_inv))
|
| 1138 |
+
if fw_limit == 0:
|
| 1139 |
+
return b_idx
|
| 1140 |
+
|
| 1141 |
+
return f_idx & b_idx
|
| 1142 |
+
|
| 1143 |
+
|
| 1144 |
+
def _rolling_window(a: npt.NDArray[np.bool_], window: int) -> npt.NDArray[np.bool_]:
|
| 1145 |
+
"""
|
| 1146 |
+
[True, True, False, True, False], 2 ->
|
| 1147 |
+
|
| 1148 |
+
[
|
| 1149 |
+
[True, True],
|
| 1150 |
+
[True, False],
|
| 1151 |
+
[False, True],
|
| 1152 |
+
[True, False],
|
| 1153 |
+
]
|
| 1154 |
+
"""
|
| 1155 |
+
# https://stackoverflow.com/a/6811241
|
| 1156 |
+
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
|
| 1157 |
+
strides = a.strides + (a.strides[-1],)
|
| 1158 |
+
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
|
videollama2/lib/python3.10/site-packages/pandas/core/nanops.py
ADDED
|
@@ -0,0 +1,1748 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
import itertools
|
| 5 |
+
from typing import (
|
| 6 |
+
Any,
|
| 7 |
+
Callable,
|
| 8 |
+
cast,
|
| 9 |
+
)
|
| 10 |
+
import warnings
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
from pandas._config import get_option
|
| 15 |
+
|
| 16 |
+
from pandas._libs import (
|
| 17 |
+
NaT,
|
| 18 |
+
NaTType,
|
| 19 |
+
iNaT,
|
| 20 |
+
lib,
|
| 21 |
+
)
|
| 22 |
+
from pandas._typing import (
|
| 23 |
+
ArrayLike,
|
| 24 |
+
AxisInt,
|
| 25 |
+
CorrelationMethod,
|
| 26 |
+
Dtype,
|
| 27 |
+
DtypeObj,
|
| 28 |
+
F,
|
| 29 |
+
Scalar,
|
| 30 |
+
Shape,
|
| 31 |
+
npt,
|
| 32 |
+
)
|
| 33 |
+
from pandas.compat._optional import import_optional_dependency
|
| 34 |
+
from pandas.util._exceptions import find_stack_level
|
| 35 |
+
|
| 36 |
+
from pandas.core.dtypes.common import (
|
| 37 |
+
is_complex,
|
| 38 |
+
is_float,
|
| 39 |
+
is_float_dtype,
|
| 40 |
+
is_integer,
|
| 41 |
+
is_numeric_dtype,
|
| 42 |
+
is_object_dtype,
|
| 43 |
+
needs_i8_conversion,
|
| 44 |
+
pandas_dtype,
|
| 45 |
+
)
|
| 46 |
+
from pandas.core.dtypes.missing import (
|
| 47 |
+
isna,
|
| 48 |
+
na_value_for_dtype,
|
| 49 |
+
notna,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
bn = import_optional_dependency("bottleneck", errors="warn")
|
| 53 |
+
_BOTTLENECK_INSTALLED = bn is not None
|
| 54 |
+
_USE_BOTTLENECK = False
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def set_use_bottleneck(v: bool = True) -> None:
|
| 58 |
+
# set/unset to use bottleneck
|
| 59 |
+
global _USE_BOTTLENECK
|
| 60 |
+
if _BOTTLENECK_INSTALLED:
|
| 61 |
+
_USE_BOTTLENECK = v
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
set_use_bottleneck(get_option("compute.use_bottleneck"))
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class disallow:
|
| 68 |
+
def __init__(self, *dtypes: Dtype) -> None:
|
| 69 |
+
super().__init__()
|
| 70 |
+
self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes)
|
| 71 |
+
|
| 72 |
+
def check(self, obj) -> bool:
|
| 73 |
+
return hasattr(obj, "dtype") and issubclass(obj.dtype.type, self.dtypes)
|
| 74 |
+
|
| 75 |
+
def __call__(self, f: F) -> F:
|
| 76 |
+
@functools.wraps(f)
|
| 77 |
+
def _f(*args, **kwargs):
|
| 78 |
+
obj_iter = itertools.chain(args, kwargs.values())
|
| 79 |
+
if any(self.check(obj) for obj in obj_iter):
|
| 80 |
+
f_name = f.__name__.replace("nan", "")
|
| 81 |
+
raise TypeError(
|
| 82 |
+
f"reduction operation '{f_name}' not allowed for this dtype"
|
| 83 |
+
)
|
| 84 |
+
try:
|
| 85 |
+
return f(*args, **kwargs)
|
| 86 |
+
except ValueError as e:
|
| 87 |
+
# we want to transform an object array
|
| 88 |
+
# ValueError message to the more typical TypeError
|
| 89 |
+
# e.g. this is normally a disallowed function on
|
| 90 |
+
# object arrays that contain strings
|
| 91 |
+
if is_object_dtype(args[0]):
|
| 92 |
+
raise TypeError(e) from e
|
| 93 |
+
raise
|
| 94 |
+
|
| 95 |
+
return cast(F, _f)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class bottleneck_switch:
|
| 99 |
+
def __init__(self, name=None, **kwargs) -> None:
|
| 100 |
+
self.name = name
|
| 101 |
+
self.kwargs = kwargs
|
| 102 |
+
|
| 103 |
+
def __call__(self, alt: F) -> F:
|
| 104 |
+
bn_name = self.name or alt.__name__
|
| 105 |
+
|
| 106 |
+
try:
|
| 107 |
+
bn_func = getattr(bn, bn_name)
|
| 108 |
+
except (AttributeError, NameError): # pragma: no cover
|
| 109 |
+
bn_func = None
|
| 110 |
+
|
| 111 |
+
@functools.wraps(alt)
|
| 112 |
+
def f(
|
| 113 |
+
values: np.ndarray,
|
| 114 |
+
*,
|
| 115 |
+
axis: AxisInt | None = None,
|
| 116 |
+
skipna: bool = True,
|
| 117 |
+
**kwds,
|
| 118 |
+
):
|
| 119 |
+
if len(self.kwargs) > 0:
|
| 120 |
+
for k, v in self.kwargs.items():
|
| 121 |
+
if k not in kwds:
|
| 122 |
+
kwds[k] = v
|
| 123 |
+
|
| 124 |
+
if values.size == 0 and kwds.get("min_count") is None:
|
| 125 |
+
# We are empty, returning NA for our type
|
| 126 |
+
# Only applies for the default `min_count` of None
|
| 127 |
+
# since that affects how empty arrays are handled.
|
| 128 |
+
# TODO(GH-18976) update all the nanops methods to
|
| 129 |
+
# correctly handle empty inputs and remove this check.
|
| 130 |
+
# It *may* just be `var`
|
| 131 |
+
return _na_for_min_count(values, axis)
|
| 132 |
+
|
| 133 |
+
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
|
| 134 |
+
if kwds.get("mask", None) is None:
|
| 135 |
+
# `mask` is not recognised by bottleneck, would raise
|
| 136 |
+
# TypeError if called
|
| 137 |
+
kwds.pop("mask", None)
|
| 138 |
+
result = bn_func(values, axis=axis, **kwds)
|
| 139 |
+
|
| 140 |
+
# prefer to treat inf/-inf as NA, but must compute the func
|
| 141 |
+
# twice :(
|
| 142 |
+
if _has_infs(result):
|
| 143 |
+
result = alt(values, axis=axis, skipna=skipna, **kwds)
|
| 144 |
+
else:
|
| 145 |
+
result = alt(values, axis=axis, skipna=skipna, **kwds)
|
| 146 |
+
else:
|
| 147 |
+
result = alt(values, axis=axis, skipna=skipna, **kwds)
|
| 148 |
+
|
| 149 |
+
return result
|
| 150 |
+
|
| 151 |
+
return cast(F, f)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool:
|
| 155 |
+
# Bottleneck chokes on datetime64, PeriodDtype (or and EA)
|
| 156 |
+
if dtype != object and not needs_i8_conversion(dtype):
|
| 157 |
+
# GH 42878
|
| 158 |
+
# Bottleneck uses naive summation leading to O(n) loss of precision
|
| 159 |
+
# unlike numpy which implements pairwise summation, which has O(log(n)) loss
|
| 160 |
+
# crossref: https://github.com/pydata/bottleneck/issues/379
|
| 161 |
+
|
| 162 |
+
# GH 15507
|
| 163 |
+
# bottleneck does not properly upcast during the sum
|
| 164 |
+
# so can overflow
|
| 165 |
+
|
| 166 |
+
# GH 9422
|
| 167 |
+
# further we also want to preserve NaN when all elements
|
| 168 |
+
# are NaN, unlike bottleneck/numpy which consider this
|
| 169 |
+
# to be 0
|
| 170 |
+
return name not in ["nansum", "nanprod", "nanmean"]
|
| 171 |
+
return False
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def _has_infs(result) -> bool:
|
| 175 |
+
if isinstance(result, np.ndarray):
|
| 176 |
+
if result.dtype in ("f8", "f4"):
|
| 177 |
+
# Note: outside of an nanops-specific test, we always have
|
| 178 |
+
# result.ndim == 1, so there is no risk of this ravel making a copy.
|
| 179 |
+
return lib.has_infs(result.ravel("K"))
|
| 180 |
+
try:
|
| 181 |
+
return np.isinf(result).any()
|
| 182 |
+
except (TypeError, NotImplementedError):
|
| 183 |
+
# if it doesn't support infs, then it can't have infs
|
| 184 |
+
return False
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def _get_fill_value(
|
| 188 |
+
dtype: DtypeObj, fill_value: Scalar | None = None, fill_value_typ=None
|
| 189 |
+
):
|
| 190 |
+
"""return the correct fill value for the dtype of the values"""
|
| 191 |
+
if fill_value is not None:
|
| 192 |
+
return fill_value
|
| 193 |
+
if _na_ok_dtype(dtype):
|
| 194 |
+
if fill_value_typ is None:
|
| 195 |
+
return np.nan
|
| 196 |
+
else:
|
| 197 |
+
if fill_value_typ == "+inf":
|
| 198 |
+
return np.inf
|
| 199 |
+
else:
|
| 200 |
+
return -np.inf
|
| 201 |
+
else:
|
| 202 |
+
if fill_value_typ == "+inf":
|
| 203 |
+
# need the max int here
|
| 204 |
+
return lib.i8max
|
| 205 |
+
else:
|
| 206 |
+
return iNaT
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def _maybe_get_mask(
|
| 210 |
+
values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None
|
| 211 |
+
) -> npt.NDArray[np.bool_] | None:
|
| 212 |
+
"""
|
| 213 |
+
Compute a mask if and only if necessary.
|
| 214 |
+
|
| 215 |
+
This function will compute a mask iff it is necessary. Otherwise,
|
| 216 |
+
return the provided mask (potentially None) when a mask does not need to be
|
| 217 |
+
computed.
|
| 218 |
+
|
| 219 |
+
A mask is never necessary if the values array is of boolean or integer
|
| 220 |
+
dtypes, as these are incapable of storing NaNs. If passing a NaN-capable
|
| 221 |
+
dtype that is interpretable as either boolean or integer data (eg,
|
| 222 |
+
timedelta64), a mask must be provided.
|
| 223 |
+
|
| 224 |
+
If the skipna parameter is False, a new mask will not be computed.
|
| 225 |
+
|
| 226 |
+
The mask is computed using isna() by default. Setting invert=True selects
|
| 227 |
+
notna() as the masking function.
|
| 228 |
+
|
| 229 |
+
Parameters
|
| 230 |
+
----------
|
| 231 |
+
values : ndarray
|
| 232 |
+
input array to potentially compute mask for
|
| 233 |
+
skipna : bool
|
| 234 |
+
boolean for whether NaNs should be skipped
|
| 235 |
+
mask : Optional[ndarray]
|
| 236 |
+
nan-mask if known
|
| 237 |
+
|
| 238 |
+
Returns
|
| 239 |
+
-------
|
| 240 |
+
Optional[np.ndarray[bool]]
|
| 241 |
+
"""
|
| 242 |
+
if mask is None:
|
| 243 |
+
if values.dtype.kind in "biu":
|
| 244 |
+
# Boolean data cannot contain nulls, so signal via mask being None
|
| 245 |
+
return None
|
| 246 |
+
|
| 247 |
+
if skipna or values.dtype.kind in "mM":
|
| 248 |
+
mask = isna(values)
|
| 249 |
+
|
| 250 |
+
return mask
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def _get_values(
|
| 254 |
+
values: np.ndarray,
|
| 255 |
+
skipna: bool,
|
| 256 |
+
fill_value: Any = None,
|
| 257 |
+
fill_value_typ: str | None = None,
|
| 258 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 259 |
+
) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None]:
|
| 260 |
+
"""
|
| 261 |
+
Utility to get the values view, mask, dtype, dtype_max, and fill_value.
|
| 262 |
+
|
| 263 |
+
If both mask and fill_value/fill_value_typ are not None and skipna is True,
|
| 264 |
+
the values array will be copied.
|
| 265 |
+
|
| 266 |
+
For input arrays of boolean or integer dtypes, copies will only occur if a
|
| 267 |
+
precomputed mask, a fill_value/fill_value_typ, and skipna=True are
|
| 268 |
+
provided.
|
| 269 |
+
|
| 270 |
+
Parameters
|
| 271 |
+
----------
|
| 272 |
+
values : ndarray
|
| 273 |
+
input array to potentially compute mask for
|
| 274 |
+
skipna : bool
|
| 275 |
+
boolean for whether NaNs should be skipped
|
| 276 |
+
fill_value : Any
|
| 277 |
+
value to fill NaNs with
|
| 278 |
+
fill_value_typ : str
|
| 279 |
+
Set to '+inf' or '-inf' to handle dtype-specific infinities
|
| 280 |
+
mask : Optional[np.ndarray[bool]]
|
| 281 |
+
nan-mask if known
|
| 282 |
+
|
| 283 |
+
Returns
|
| 284 |
+
-------
|
| 285 |
+
values : ndarray
|
| 286 |
+
Potential copy of input value array
|
| 287 |
+
mask : Optional[ndarray[bool]]
|
| 288 |
+
Mask for values, if deemed necessary to compute
|
| 289 |
+
"""
|
| 290 |
+
# In _get_values is only called from within nanops, and in all cases
|
| 291 |
+
# with scalar fill_value. This guarantee is important for the
|
| 292 |
+
# np.where call below
|
| 293 |
+
|
| 294 |
+
mask = _maybe_get_mask(values, skipna, mask)
|
| 295 |
+
|
| 296 |
+
dtype = values.dtype
|
| 297 |
+
|
| 298 |
+
datetimelike = False
|
| 299 |
+
if values.dtype.kind in "mM":
|
| 300 |
+
# changing timedelta64/datetime64 to int64 needs to happen after
|
| 301 |
+
# finding `mask` above
|
| 302 |
+
values = np.asarray(values.view("i8"))
|
| 303 |
+
datetimelike = True
|
| 304 |
+
|
| 305 |
+
if skipna and (mask is not None):
|
| 306 |
+
# get our fill value (in case we need to provide an alternative
|
| 307 |
+
# dtype for it)
|
| 308 |
+
fill_value = _get_fill_value(
|
| 309 |
+
dtype, fill_value=fill_value, fill_value_typ=fill_value_typ
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
if fill_value is not None:
|
| 313 |
+
if mask.any():
|
| 314 |
+
if datetimelike or _na_ok_dtype(dtype):
|
| 315 |
+
values = values.copy()
|
| 316 |
+
np.putmask(values, mask, fill_value)
|
| 317 |
+
else:
|
| 318 |
+
# np.where will promote if needed
|
| 319 |
+
values = np.where(~mask, values, fill_value)
|
| 320 |
+
|
| 321 |
+
return values, mask
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def _get_dtype_max(dtype: np.dtype) -> np.dtype:
|
| 325 |
+
# return a platform independent precision dtype
|
| 326 |
+
dtype_max = dtype
|
| 327 |
+
if dtype.kind in "bi":
|
| 328 |
+
dtype_max = np.dtype(np.int64)
|
| 329 |
+
elif dtype.kind == "u":
|
| 330 |
+
dtype_max = np.dtype(np.uint64)
|
| 331 |
+
elif dtype.kind == "f":
|
| 332 |
+
dtype_max = np.dtype(np.float64)
|
| 333 |
+
return dtype_max
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def _na_ok_dtype(dtype: DtypeObj) -> bool:
|
| 337 |
+
if needs_i8_conversion(dtype):
|
| 338 |
+
return False
|
| 339 |
+
return not issubclass(dtype.type, np.integer)
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def _wrap_results(result, dtype: np.dtype, fill_value=None):
|
| 343 |
+
"""wrap our results if needed"""
|
| 344 |
+
if result is NaT:
|
| 345 |
+
pass
|
| 346 |
+
|
| 347 |
+
elif dtype.kind == "M":
|
| 348 |
+
if fill_value is None:
|
| 349 |
+
# GH#24293
|
| 350 |
+
fill_value = iNaT
|
| 351 |
+
if not isinstance(result, np.ndarray):
|
| 352 |
+
assert not isna(fill_value), "Expected non-null fill_value"
|
| 353 |
+
if result == fill_value:
|
| 354 |
+
result = np.nan
|
| 355 |
+
|
| 356 |
+
if isna(result):
|
| 357 |
+
result = np.datetime64("NaT", "ns").astype(dtype)
|
| 358 |
+
else:
|
| 359 |
+
result = np.int64(result).view(dtype)
|
| 360 |
+
# retain original unit
|
| 361 |
+
result = result.astype(dtype, copy=False)
|
| 362 |
+
else:
|
| 363 |
+
# If we have float dtype, taking a view will give the wrong result
|
| 364 |
+
result = result.astype(dtype)
|
| 365 |
+
elif dtype.kind == "m":
|
| 366 |
+
if not isinstance(result, np.ndarray):
|
| 367 |
+
if result == fill_value or np.isnan(result):
|
| 368 |
+
result = np.timedelta64("NaT").astype(dtype)
|
| 369 |
+
|
| 370 |
+
elif np.fabs(result) > lib.i8max:
|
| 371 |
+
# raise if we have a timedelta64[ns] which is too large
|
| 372 |
+
raise ValueError("overflow in timedelta operation")
|
| 373 |
+
else:
|
| 374 |
+
# return a timedelta64 with the original unit
|
| 375 |
+
result = np.int64(result).astype(dtype, copy=False)
|
| 376 |
+
|
| 377 |
+
else:
|
| 378 |
+
result = result.astype("m8[ns]").view(dtype)
|
| 379 |
+
|
| 380 |
+
return result
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def _datetimelike_compat(func: F) -> F:
|
| 384 |
+
"""
|
| 385 |
+
If we have datetime64 or timedelta64 values, ensure we have a correct
|
| 386 |
+
mask before calling the wrapped function, then cast back afterwards.
|
| 387 |
+
"""
|
| 388 |
+
|
| 389 |
+
@functools.wraps(func)
|
| 390 |
+
def new_func(
|
| 391 |
+
values: np.ndarray,
|
| 392 |
+
*,
|
| 393 |
+
axis: AxisInt | None = None,
|
| 394 |
+
skipna: bool = True,
|
| 395 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 396 |
+
**kwargs,
|
| 397 |
+
):
|
| 398 |
+
orig_values = values
|
| 399 |
+
|
| 400 |
+
datetimelike = values.dtype.kind in "mM"
|
| 401 |
+
if datetimelike and mask is None:
|
| 402 |
+
mask = isna(values)
|
| 403 |
+
|
| 404 |
+
result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs)
|
| 405 |
+
|
| 406 |
+
if datetimelike:
|
| 407 |
+
result = _wrap_results(result, orig_values.dtype, fill_value=iNaT)
|
| 408 |
+
if not skipna:
|
| 409 |
+
assert mask is not None # checked above
|
| 410 |
+
result = _mask_datetimelike_result(result, axis, mask, orig_values)
|
| 411 |
+
|
| 412 |
+
return result
|
| 413 |
+
|
| 414 |
+
return cast(F, new_func)
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def _na_for_min_count(values: np.ndarray, axis: AxisInt | None) -> Scalar | np.ndarray:
|
| 418 |
+
"""
|
| 419 |
+
Return the missing value for `values`.
|
| 420 |
+
|
| 421 |
+
Parameters
|
| 422 |
+
----------
|
| 423 |
+
values : ndarray
|
| 424 |
+
axis : int or None
|
| 425 |
+
axis for the reduction, required if values.ndim > 1.
|
| 426 |
+
|
| 427 |
+
Returns
|
| 428 |
+
-------
|
| 429 |
+
result : scalar or ndarray
|
| 430 |
+
For 1-D values, returns a scalar of the correct missing type.
|
| 431 |
+
For 2-D values, returns a 1-D array where each element is missing.
|
| 432 |
+
"""
|
| 433 |
+
# we either return np.nan or pd.NaT
|
| 434 |
+
if values.dtype.kind in "iufcb":
|
| 435 |
+
values = values.astype("float64")
|
| 436 |
+
fill_value = na_value_for_dtype(values.dtype)
|
| 437 |
+
|
| 438 |
+
if values.ndim == 1:
|
| 439 |
+
return fill_value
|
| 440 |
+
elif axis is None:
|
| 441 |
+
return fill_value
|
| 442 |
+
else:
|
| 443 |
+
result_shape = values.shape[:axis] + values.shape[axis + 1 :]
|
| 444 |
+
|
| 445 |
+
return np.full(result_shape, fill_value, dtype=values.dtype)
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
def maybe_operate_rowwise(func: F) -> F:
|
| 449 |
+
"""
|
| 450 |
+
NumPy operations on C-contiguous ndarrays with axis=1 can be
|
| 451 |
+
very slow if axis 1 >> axis 0.
|
| 452 |
+
Operate row-by-row and concatenate the results.
|
| 453 |
+
"""
|
| 454 |
+
|
| 455 |
+
@functools.wraps(func)
|
| 456 |
+
def newfunc(values: np.ndarray, *, axis: AxisInt | None = None, **kwargs):
|
| 457 |
+
if (
|
| 458 |
+
axis == 1
|
| 459 |
+
and values.ndim == 2
|
| 460 |
+
and values.flags["C_CONTIGUOUS"]
|
| 461 |
+
# only takes this path for wide arrays (long dataframes), for threshold see
|
| 462 |
+
# https://github.com/pandas-dev/pandas/pull/43311#issuecomment-974891737
|
| 463 |
+
and (values.shape[1] / 1000) > values.shape[0]
|
| 464 |
+
and values.dtype != object
|
| 465 |
+
and values.dtype != bool
|
| 466 |
+
):
|
| 467 |
+
arrs = list(values)
|
| 468 |
+
if kwargs.get("mask") is not None:
|
| 469 |
+
mask = kwargs.pop("mask")
|
| 470 |
+
results = [
|
| 471 |
+
func(arrs[i], mask=mask[i], **kwargs) for i in range(len(arrs))
|
| 472 |
+
]
|
| 473 |
+
else:
|
| 474 |
+
results = [func(x, **kwargs) for x in arrs]
|
| 475 |
+
return np.array(results)
|
| 476 |
+
|
| 477 |
+
return func(values, axis=axis, **kwargs)
|
| 478 |
+
|
| 479 |
+
return cast(F, newfunc)
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
def nanany(
|
| 483 |
+
values: np.ndarray,
|
| 484 |
+
*,
|
| 485 |
+
axis: AxisInt | None = None,
|
| 486 |
+
skipna: bool = True,
|
| 487 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 488 |
+
) -> bool:
|
| 489 |
+
"""
|
| 490 |
+
Check if any elements along an axis evaluate to True.
|
| 491 |
+
|
| 492 |
+
Parameters
|
| 493 |
+
----------
|
| 494 |
+
values : ndarray
|
| 495 |
+
axis : int, optional
|
| 496 |
+
skipna : bool, default True
|
| 497 |
+
mask : ndarray[bool], optional
|
| 498 |
+
nan-mask if known
|
| 499 |
+
|
| 500 |
+
Returns
|
| 501 |
+
-------
|
| 502 |
+
result : bool
|
| 503 |
+
|
| 504 |
+
Examples
|
| 505 |
+
--------
|
| 506 |
+
>>> from pandas.core import nanops
|
| 507 |
+
>>> s = pd.Series([1, 2])
|
| 508 |
+
>>> nanops.nanany(s.values)
|
| 509 |
+
True
|
| 510 |
+
|
| 511 |
+
>>> from pandas.core import nanops
|
| 512 |
+
>>> s = pd.Series([np.nan])
|
| 513 |
+
>>> nanops.nanany(s.values)
|
| 514 |
+
False
|
| 515 |
+
"""
|
| 516 |
+
if values.dtype.kind in "iub" and mask is None:
|
| 517 |
+
# GH#26032 fastpath
|
| 518 |
+
# error: Incompatible return value type (got "Union[bool_, ndarray]",
|
| 519 |
+
# expected "bool")
|
| 520 |
+
return values.any(axis) # type: ignore[return-value]
|
| 521 |
+
|
| 522 |
+
if values.dtype.kind == "M":
|
| 523 |
+
# GH#34479
|
| 524 |
+
warnings.warn(
|
| 525 |
+
"'any' with datetime64 dtypes is deprecated and will raise in a "
|
| 526 |
+
"future version. Use (obj != pd.Timestamp(0)).any() instead.",
|
| 527 |
+
FutureWarning,
|
| 528 |
+
stacklevel=find_stack_level(),
|
| 529 |
+
)
|
| 530 |
+
|
| 531 |
+
values, _ = _get_values(values, skipna, fill_value=False, mask=mask)
|
| 532 |
+
|
| 533 |
+
# For object type, any won't necessarily return
|
| 534 |
+
# boolean values (numpy/numpy#4352)
|
| 535 |
+
if values.dtype == object:
|
| 536 |
+
values = values.astype(bool)
|
| 537 |
+
|
| 538 |
+
# error: Incompatible return value type (got "Union[bool_, ndarray]", expected
|
| 539 |
+
# "bool")
|
| 540 |
+
return values.any(axis) # type: ignore[return-value]
|
| 541 |
+
|
| 542 |
+
|
| 543 |
+
def nanall(
|
| 544 |
+
values: np.ndarray,
|
| 545 |
+
*,
|
| 546 |
+
axis: AxisInt | None = None,
|
| 547 |
+
skipna: bool = True,
|
| 548 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 549 |
+
) -> bool:
|
| 550 |
+
"""
|
| 551 |
+
Check if all elements along an axis evaluate to True.
|
| 552 |
+
|
| 553 |
+
Parameters
|
| 554 |
+
----------
|
| 555 |
+
values : ndarray
|
| 556 |
+
axis : int, optional
|
| 557 |
+
skipna : bool, default True
|
| 558 |
+
mask : ndarray[bool], optional
|
| 559 |
+
nan-mask if known
|
| 560 |
+
|
| 561 |
+
Returns
|
| 562 |
+
-------
|
| 563 |
+
result : bool
|
| 564 |
+
|
| 565 |
+
Examples
|
| 566 |
+
--------
|
| 567 |
+
>>> from pandas.core import nanops
|
| 568 |
+
>>> s = pd.Series([1, 2, np.nan])
|
| 569 |
+
>>> nanops.nanall(s.values)
|
| 570 |
+
True
|
| 571 |
+
|
| 572 |
+
>>> from pandas.core import nanops
|
| 573 |
+
>>> s = pd.Series([1, 0])
|
| 574 |
+
>>> nanops.nanall(s.values)
|
| 575 |
+
False
|
| 576 |
+
"""
|
| 577 |
+
if values.dtype.kind in "iub" and mask is None:
|
| 578 |
+
# GH#26032 fastpath
|
| 579 |
+
# error: Incompatible return value type (got "Union[bool_, ndarray]",
|
| 580 |
+
# expected "bool")
|
| 581 |
+
return values.all(axis) # type: ignore[return-value]
|
| 582 |
+
|
| 583 |
+
if values.dtype.kind == "M":
|
| 584 |
+
# GH#34479
|
| 585 |
+
warnings.warn(
|
| 586 |
+
"'all' with datetime64 dtypes is deprecated and will raise in a "
|
| 587 |
+
"future version. Use (obj != pd.Timestamp(0)).all() instead.",
|
| 588 |
+
FutureWarning,
|
| 589 |
+
stacklevel=find_stack_level(),
|
| 590 |
+
)
|
| 591 |
+
|
| 592 |
+
values, _ = _get_values(values, skipna, fill_value=True, mask=mask)
|
| 593 |
+
|
| 594 |
+
# For object type, all won't necessarily return
|
| 595 |
+
# boolean values (numpy/numpy#4352)
|
| 596 |
+
if values.dtype == object:
|
| 597 |
+
values = values.astype(bool)
|
| 598 |
+
|
| 599 |
+
# error: Incompatible return value type (got "Union[bool_, ndarray]", expected
|
| 600 |
+
# "bool")
|
| 601 |
+
return values.all(axis) # type: ignore[return-value]
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
@disallow("M8")
|
| 605 |
+
@_datetimelike_compat
|
| 606 |
+
@maybe_operate_rowwise
|
| 607 |
+
def nansum(
|
| 608 |
+
values: np.ndarray,
|
| 609 |
+
*,
|
| 610 |
+
axis: AxisInt | None = None,
|
| 611 |
+
skipna: bool = True,
|
| 612 |
+
min_count: int = 0,
|
| 613 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 614 |
+
) -> float:
|
| 615 |
+
"""
|
| 616 |
+
Sum the elements along an axis ignoring NaNs
|
| 617 |
+
|
| 618 |
+
Parameters
|
| 619 |
+
----------
|
| 620 |
+
values : ndarray[dtype]
|
| 621 |
+
axis : int, optional
|
| 622 |
+
skipna : bool, default True
|
| 623 |
+
min_count: int, default 0
|
| 624 |
+
mask : ndarray[bool], optional
|
| 625 |
+
nan-mask if known
|
| 626 |
+
|
| 627 |
+
Returns
|
| 628 |
+
-------
|
| 629 |
+
result : dtype
|
| 630 |
+
|
| 631 |
+
Examples
|
| 632 |
+
--------
|
| 633 |
+
>>> from pandas.core import nanops
|
| 634 |
+
>>> s = pd.Series([1, 2, np.nan])
|
| 635 |
+
>>> nanops.nansum(s.values)
|
| 636 |
+
3.0
|
| 637 |
+
"""
|
| 638 |
+
dtype = values.dtype
|
| 639 |
+
values, mask = _get_values(values, skipna, fill_value=0, mask=mask)
|
| 640 |
+
dtype_sum = _get_dtype_max(dtype)
|
| 641 |
+
if dtype.kind == "f":
|
| 642 |
+
dtype_sum = dtype
|
| 643 |
+
elif dtype.kind == "m":
|
| 644 |
+
dtype_sum = np.dtype(np.float64)
|
| 645 |
+
|
| 646 |
+
the_sum = values.sum(axis, dtype=dtype_sum)
|
| 647 |
+
the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count)
|
| 648 |
+
|
| 649 |
+
return the_sum
|
| 650 |
+
|
| 651 |
+
|
| 652 |
+
def _mask_datetimelike_result(
|
| 653 |
+
result: np.ndarray | np.datetime64 | np.timedelta64,
|
| 654 |
+
axis: AxisInt | None,
|
| 655 |
+
mask: npt.NDArray[np.bool_],
|
| 656 |
+
orig_values: np.ndarray,
|
| 657 |
+
) -> np.ndarray | np.datetime64 | np.timedelta64 | NaTType:
|
| 658 |
+
if isinstance(result, np.ndarray):
|
| 659 |
+
# we need to apply the mask
|
| 660 |
+
result = result.astype("i8").view(orig_values.dtype)
|
| 661 |
+
axis_mask = mask.any(axis=axis)
|
| 662 |
+
# error: Unsupported target for indexed assignment ("Union[ndarray[Any, Any],
|
| 663 |
+
# datetime64, timedelta64]")
|
| 664 |
+
result[axis_mask] = iNaT # type: ignore[index]
|
| 665 |
+
else:
|
| 666 |
+
if mask.any():
|
| 667 |
+
return np.int64(iNaT).view(orig_values.dtype)
|
| 668 |
+
return result
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
@bottleneck_switch()
|
| 672 |
+
@_datetimelike_compat
|
| 673 |
+
def nanmean(
|
| 674 |
+
values: np.ndarray,
|
| 675 |
+
*,
|
| 676 |
+
axis: AxisInt | None = None,
|
| 677 |
+
skipna: bool = True,
|
| 678 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 679 |
+
) -> float:
|
| 680 |
+
"""
|
| 681 |
+
Compute the mean of the element along an axis ignoring NaNs
|
| 682 |
+
|
| 683 |
+
Parameters
|
| 684 |
+
----------
|
| 685 |
+
values : ndarray
|
| 686 |
+
axis : int, optional
|
| 687 |
+
skipna : bool, default True
|
| 688 |
+
mask : ndarray[bool], optional
|
| 689 |
+
nan-mask if known
|
| 690 |
+
|
| 691 |
+
Returns
|
| 692 |
+
-------
|
| 693 |
+
float
|
| 694 |
+
Unless input is a float array, in which case use the same
|
| 695 |
+
precision as the input array.
|
| 696 |
+
|
| 697 |
+
Examples
|
| 698 |
+
--------
|
| 699 |
+
>>> from pandas.core import nanops
|
| 700 |
+
>>> s = pd.Series([1, 2, np.nan])
|
| 701 |
+
>>> nanops.nanmean(s.values)
|
| 702 |
+
1.5
|
| 703 |
+
"""
|
| 704 |
+
dtype = values.dtype
|
| 705 |
+
values, mask = _get_values(values, skipna, fill_value=0, mask=mask)
|
| 706 |
+
dtype_sum = _get_dtype_max(dtype)
|
| 707 |
+
dtype_count = np.dtype(np.float64)
|
| 708 |
+
|
| 709 |
+
# not using needs_i8_conversion because that includes period
|
| 710 |
+
if dtype.kind in "mM":
|
| 711 |
+
dtype_sum = np.dtype(np.float64)
|
| 712 |
+
elif dtype.kind in "iu":
|
| 713 |
+
dtype_sum = np.dtype(np.float64)
|
| 714 |
+
elif dtype.kind == "f":
|
| 715 |
+
dtype_sum = dtype
|
| 716 |
+
dtype_count = dtype
|
| 717 |
+
|
| 718 |
+
count = _get_counts(values.shape, mask, axis, dtype=dtype_count)
|
| 719 |
+
the_sum = values.sum(axis, dtype=dtype_sum)
|
| 720 |
+
the_sum = _ensure_numeric(the_sum)
|
| 721 |
+
|
| 722 |
+
if axis is not None and getattr(the_sum, "ndim", False):
|
| 723 |
+
count = cast(np.ndarray, count)
|
| 724 |
+
with np.errstate(all="ignore"):
|
| 725 |
+
# suppress division by zero warnings
|
| 726 |
+
the_mean = the_sum / count
|
| 727 |
+
ct_mask = count == 0
|
| 728 |
+
if ct_mask.any():
|
| 729 |
+
the_mean[ct_mask] = np.nan
|
| 730 |
+
else:
|
| 731 |
+
the_mean = the_sum / count if count > 0 else np.nan
|
| 732 |
+
|
| 733 |
+
return the_mean
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
@bottleneck_switch()
|
| 737 |
+
def nanmedian(values, *, axis: AxisInt | None = None, skipna: bool = True, mask=None):
|
| 738 |
+
"""
|
| 739 |
+
Parameters
|
| 740 |
+
----------
|
| 741 |
+
values : ndarray
|
| 742 |
+
axis : int, optional
|
| 743 |
+
skipna : bool, default True
|
| 744 |
+
mask : ndarray[bool], optional
|
| 745 |
+
nan-mask if known
|
| 746 |
+
|
| 747 |
+
Returns
|
| 748 |
+
-------
|
| 749 |
+
result : float
|
| 750 |
+
Unless input is a float array, in which case use the same
|
| 751 |
+
precision as the input array.
|
| 752 |
+
|
| 753 |
+
Examples
|
| 754 |
+
--------
|
| 755 |
+
>>> from pandas.core import nanops
|
| 756 |
+
>>> s = pd.Series([1, np.nan, 2, 2])
|
| 757 |
+
>>> nanops.nanmedian(s.values)
|
| 758 |
+
2.0
|
| 759 |
+
"""
|
| 760 |
+
# for floats without mask, the data already uses NaN as missing value
|
| 761 |
+
# indicator, and `mask` will be calculated from that below -> in those
|
| 762 |
+
# cases we never need to set NaN to the masked values
|
| 763 |
+
using_nan_sentinel = values.dtype.kind == "f" and mask is None
|
| 764 |
+
|
| 765 |
+
def get_median(x, _mask=None):
|
| 766 |
+
if _mask is None:
|
| 767 |
+
_mask = notna(x)
|
| 768 |
+
else:
|
| 769 |
+
_mask = ~_mask
|
| 770 |
+
if not skipna and not _mask.all():
|
| 771 |
+
return np.nan
|
| 772 |
+
with warnings.catch_warnings():
|
| 773 |
+
# Suppress RuntimeWarning about All-NaN slice
|
| 774 |
+
warnings.filterwarnings(
|
| 775 |
+
"ignore", "All-NaN slice encountered", RuntimeWarning
|
| 776 |
+
)
|
| 777 |
+
res = np.nanmedian(x[_mask])
|
| 778 |
+
return res
|
| 779 |
+
|
| 780 |
+
dtype = values.dtype
|
| 781 |
+
values, mask = _get_values(values, skipna, mask=mask, fill_value=None)
|
| 782 |
+
if values.dtype.kind != "f":
|
| 783 |
+
if values.dtype == object:
|
| 784 |
+
# GH#34671 avoid casting strings to numeric
|
| 785 |
+
inferred = lib.infer_dtype(values)
|
| 786 |
+
if inferred in ["string", "mixed"]:
|
| 787 |
+
raise TypeError(f"Cannot convert {values} to numeric")
|
| 788 |
+
try:
|
| 789 |
+
values = values.astype("f8")
|
| 790 |
+
except ValueError as err:
|
| 791 |
+
# e.g. "could not convert string to float: 'a'"
|
| 792 |
+
raise TypeError(str(err)) from err
|
| 793 |
+
if not using_nan_sentinel and mask is not None:
|
| 794 |
+
if not values.flags.writeable:
|
| 795 |
+
values = values.copy()
|
| 796 |
+
values[mask] = np.nan
|
| 797 |
+
|
| 798 |
+
notempty = values.size
|
| 799 |
+
|
| 800 |
+
# an array from a frame
|
| 801 |
+
if values.ndim > 1 and axis is not None:
|
| 802 |
+
# there's a non-empty array to apply over otherwise numpy raises
|
| 803 |
+
if notempty:
|
| 804 |
+
if not skipna:
|
| 805 |
+
res = np.apply_along_axis(get_median, axis, values)
|
| 806 |
+
|
| 807 |
+
else:
|
| 808 |
+
# fastpath for the skipna case
|
| 809 |
+
with warnings.catch_warnings():
|
| 810 |
+
# Suppress RuntimeWarning about All-NaN slice
|
| 811 |
+
warnings.filterwarnings(
|
| 812 |
+
"ignore", "All-NaN slice encountered", RuntimeWarning
|
| 813 |
+
)
|
| 814 |
+
if (values.shape[1] == 1 and axis == 0) or (
|
| 815 |
+
values.shape[0] == 1 and axis == 1
|
| 816 |
+
):
|
| 817 |
+
# GH52788: fastpath when squeezable, nanmedian for 2D array slow
|
| 818 |
+
res = np.nanmedian(np.squeeze(values), keepdims=True)
|
| 819 |
+
else:
|
| 820 |
+
res = np.nanmedian(values, axis=axis)
|
| 821 |
+
|
| 822 |
+
else:
|
| 823 |
+
# must return the correct shape, but median is not defined for the
|
| 824 |
+
# empty set so return nans of shape "everything but the passed axis"
|
| 825 |
+
# since "axis" is where the reduction would occur if we had a nonempty
|
| 826 |
+
# array
|
| 827 |
+
res = _get_empty_reduction_result(values.shape, axis)
|
| 828 |
+
|
| 829 |
+
else:
|
| 830 |
+
# otherwise return a scalar value
|
| 831 |
+
res = get_median(values, mask) if notempty else np.nan
|
| 832 |
+
return _wrap_results(res, dtype)
|
| 833 |
+
|
| 834 |
+
|
| 835 |
+
def _get_empty_reduction_result(
|
| 836 |
+
shape: Shape,
|
| 837 |
+
axis: AxisInt,
|
| 838 |
+
) -> np.ndarray:
|
| 839 |
+
"""
|
| 840 |
+
The result from a reduction on an empty ndarray.
|
| 841 |
+
|
| 842 |
+
Parameters
|
| 843 |
+
----------
|
| 844 |
+
shape : Tuple[int, ...]
|
| 845 |
+
axis : int
|
| 846 |
+
|
| 847 |
+
Returns
|
| 848 |
+
-------
|
| 849 |
+
np.ndarray
|
| 850 |
+
"""
|
| 851 |
+
shp = np.array(shape)
|
| 852 |
+
dims = np.arange(len(shape))
|
| 853 |
+
ret = np.empty(shp[dims != axis], dtype=np.float64)
|
| 854 |
+
ret.fill(np.nan)
|
| 855 |
+
return ret
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
def _get_counts_nanvar(
|
| 859 |
+
values_shape: Shape,
|
| 860 |
+
mask: npt.NDArray[np.bool_] | None,
|
| 861 |
+
axis: AxisInt | None,
|
| 862 |
+
ddof: int,
|
| 863 |
+
dtype: np.dtype = np.dtype(np.float64),
|
| 864 |
+
) -> tuple[float | np.ndarray, float | np.ndarray]:
|
| 865 |
+
"""
|
| 866 |
+
Get the count of non-null values along an axis, accounting
|
| 867 |
+
for degrees of freedom.
|
| 868 |
+
|
| 869 |
+
Parameters
|
| 870 |
+
----------
|
| 871 |
+
values_shape : Tuple[int, ...]
|
| 872 |
+
shape tuple from values ndarray, used if mask is None
|
| 873 |
+
mask : Optional[ndarray[bool]]
|
| 874 |
+
locations in values that should be considered missing
|
| 875 |
+
axis : Optional[int]
|
| 876 |
+
axis to count along
|
| 877 |
+
ddof : int
|
| 878 |
+
degrees of freedom
|
| 879 |
+
dtype : type, optional
|
| 880 |
+
type to use for count
|
| 881 |
+
|
| 882 |
+
Returns
|
| 883 |
+
-------
|
| 884 |
+
count : int, np.nan or np.ndarray
|
| 885 |
+
d : int, np.nan or np.ndarray
|
| 886 |
+
"""
|
| 887 |
+
count = _get_counts(values_shape, mask, axis, dtype=dtype)
|
| 888 |
+
d = count - dtype.type(ddof)
|
| 889 |
+
|
| 890 |
+
# always return NaN, never inf
|
| 891 |
+
if is_float(count):
|
| 892 |
+
if count <= ddof:
|
| 893 |
+
# error: Incompatible types in assignment (expression has type
|
| 894 |
+
# "float", variable has type "Union[floating[Any], ndarray[Any,
|
| 895 |
+
# dtype[floating[Any]]]]")
|
| 896 |
+
count = np.nan # type: ignore[assignment]
|
| 897 |
+
d = np.nan
|
| 898 |
+
else:
|
| 899 |
+
# count is not narrowed by is_float check
|
| 900 |
+
count = cast(np.ndarray, count)
|
| 901 |
+
mask = count <= ddof
|
| 902 |
+
if mask.any():
|
| 903 |
+
np.putmask(d, mask, np.nan)
|
| 904 |
+
np.putmask(count, mask, np.nan)
|
| 905 |
+
return count, d
|
| 906 |
+
|
| 907 |
+
|
| 908 |
+
@bottleneck_switch(ddof=1)
|
| 909 |
+
def nanstd(
|
| 910 |
+
values,
|
| 911 |
+
*,
|
| 912 |
+
axis: AxisInt | None = None,
|
| 913 |
+
skipna: bool = True,
|
| 914 |
+
ddof: int = 1,
|
| 915 |
+
mask=None,
|
| 916 |
+
):
|
| 917 |
+
"""
|
| 918 |
+
Compute the standard deviation along given axis while ignoring NaNs
|
| 919 |
+
|
| 920 |
+
Parameters
|
| 921 |
+
----------
|
| 922 |
+
values : ndarray
|
| 923 |
+
axis : int, optional
|
| 924 |
+
skipna : bool, default True
|
| 925 |
+
ddof : int, default 1
|
| 926 |
+
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
|
| 927 |
+
where N represents the number of elements.
|
| 928 |
+
mask : ndarray[bool], optional
|
| 929 |
+
nan-mask if known
|
| 930 |
+
|
| 931 |
+
Returns
|
| 932 |
+
-------
|
| 933 |
+
result : float
|
| 934 |
+
Unless input is a float array, in which case use the same
|
| 935 |
+
precision as the input array.
|
| 936 |
+
|
| 937 |
+
Examples
|
| 938 |
+
--------
|
| 939 |
+
>>> from pandas.core import nanops
|
| 940 |
+
>>> s = pd.Series([1, np.nan, 2, 3])
|
| 941 |
+
>>> nanops.nanstd(s.values)
|
| 942 |
+
1.0
|
| 943 |
+
"""
|
| 944 |
+
if values.dtype == "M8[ns]":
|
| 945 |
+
values = values.view("m8[ns]")
|
| 946 |
+
|
| 947 |
+
orig_dtype = values.dtype
|
| 948 |
+
values, mask = _get_values(values, skipna, mask=mask)
|
| 949 |
+
|
| 950 |
+
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask))
|
| 951 |
+
return _wrap_results(result, orig_dtype)
|
| 952 |
+
|
| 953 |
+
|
| 954 |
+
@disallow("M8", "m8")
|
| 955 |
+
@bottleneck_switch(ddof=1)
|
| 956 |
+
def nanvar(
|
| 957 |
+
values: np.ndarray,
|
| 958 |
+
*,
|
| 959 |
+
axis: AxisInt | None = None,
|
| 960 |
+
skipna: bool = True,
|
| 961 |
+
ddof: int = 1,
|
| 962 |
+
mask=None,
|
| 963 |
+
):
|
| 964 |
+
"""
|
| 965 |
+
Compute the variance along given axis while ignoring NaNs
|
| 966 |
+
|
| 967 |
+
Parameters
|
| 968 |
+
----------
|
| 969 |
+
values : ndarray
|
| 970 |
+
axis : int, optional
|
| 971 |
+
skipna : bool, default True
|
| 972 |
+
ddof : int, default 1
|
| 973 |
+
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
|
| 974 |
+
where N represents the number of elements.
|
| 975 |
+
mask : ndarray[bool], optional
|
| 976 |
+
nan-mask if known
|
| 977 |
+
|
| 978 |
+
Returns
|
| 979 |
+
-------
|
| 980 |
+
result : float
|
| 981 |
+
Unless input is a float array, in which case use the same
|
| 982 |
+
precision as the input array.
|
| 983 |
+
|
| 984 |
+
Examples
|
| 985 |
+
--------
|
| 986 |
+
>>> from pandas.core import nanops
|
| 987 |
+
>>> s = pd.Series([1, np.nan, 2, 3])
|
| 988 |
+
>>> nanops.nanvar(s.values)
|
| 989 |
+
1.0
|
| 990 |
+
"""
|
| 991 |
+
dtype = values.dtype
|
| 992 |
+
mask = _maybe_get_mask(values, skipna, mask)
|
| 993 |
+
if dtype.kind in "iu":
|
| 994 |
+
values = values.astype("f8")
|
| 995 |
+
if mask is not None:
|
| 996 |
+
values[mask] = np.nan
|
| 997 |
+
|
| 998 |
+
if values.dtype.kind == "f":
|
| 999 |
+
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
|
| 1000 |
+
else:
|
| 1001 |
+
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof)
|
| 1002 |
+
|
| 1003 |
+
if skipna and mask is not None:
|
| 1004 |
+
values = values.copy()
|
| 1005 |
+
np.putmask(values, mask, 0)
|
| 1006 |
+
|
| 1007 |
+
# xref GH10242
|
| 1008 |
+
# Compute variance via two-pass algorithm, which is stable against
|
| 1009 |
+
# cancellation errors and relatively accurate for small numbers of
|
| 1010 |
+
# observations.
|
| 1011 |
+
#
|
| 1012 |
+
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
|
| 1013 |
+
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
|
| 1014 |
+
if axis is not None:
|
| 1015 |
+
avg = np.expand_dims(avg, axis)
|
| 1016 |
+
sqr = _ensure_numeric((avg - values) ** 2)
|
| 1017 |
+
if mask is not None:
|
| 1018 |
+
np.putmask(sqr, mask, 0)
|
| 1019 |
+
result = sqr.sum(axis=axis, dtype=np.float64) / d
|
| 1020 |
+
|
| 1021 |
+
# Return variance as np.float64 (the datatype used in the accumulator),
|
| 1022 |
+
# unless we were dealing with a float array, in which case use the same
|
| 1023 |
+
# precision as the original values array.
|
| 1024 |
+
if dtype.kind == "f":
|
| 1025 |
+
result = result.astype(dtype, copy=False)
|
| 1026 |
+
return result
|
| 1027 |
+
|
| 1028 |
+
|
| 1029 |
+
@disallow("M8", "m8")
|
| 1030 |
+
def nansem(
|
| 1031 |
+
values: np.ndarray,
|
| 1032 |
+
*,
|
| 1033 |
+
axis: AxisInt | None = None,
|
| 1034 |
+
skipna: bool = True,
|
| 1035 |
+
ddof: int = 1,
|
| 1036 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 1037 |
+
) -> float:
|
| 1038 |
+
"""
|
| 1039 |
+
Compute the standard error in the mean along given axis while ignoring NaNs
|
| 1040 |
+
|
| 1041 |
+
Parameters
|
| 1042 |
+
----------
|
| 1043 |
+
values : ndarray
|
| 1044 |
+
axis : int, optional
|
| 1045 |
+
skipna : bool, default True
|
| 1046 |
+
ddof : int, default 1
|
| 1047 |
+
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
|
| 1048 |
+
where N represents the number of elements.
|
| 1049 |
+
mask : ndarray[bool], optional
|
| 1050 |
+
nan-mask if known
|
| 1051 |
+
|
| 1052 |
+
Returns
|
| 1053 |
+
-------
|
| 1054 |
+
result : float64
|
| 1055 |
+
Unless input is a float array, in which case use the same
|
| 1056 |
+
precision as the input array.
|
| 1057 |
+
|
| 1058 |
+
Examples
|
| 1059 |
+
--------
|
| 1060 |
+
>>> from pandas.core import nanops
|
| 1061 |
+
>>> s = pd.Series([1, np.nan, 2, 3])
|
| 1062 |
+
>>> nanops.nansem(s.values)
|
| 1063 |
+
0.5773502691896258
|
| 1064 |
+
"""
|
| 1065 |
+
# This checks if non-numeric-like data is passed with numeric_only=False
|
| 1066 |
+
# and raises a TypeError otherwise
|
| 1067 |
+
nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
|
| 1068 |
+
|
| 1069 |
+
mask = _maybe_get_mask(values, skipna, mask)
|
| 1070 |
+
if values.dtype.kind != "f":
|
| 1071 |
+
values = values.astype("f8")
|
| 1072 |
+
|
| 1073 |
+
if not skipna and mask is not None and mask.any():
|
| 1074 |
+
return np.nan
|
| 1075 |
+
|
| 1076 |
+
count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
|
| 1077 |
+
var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
|
| 1078 |
+
|
| 1079 |
+
return np.sqrt(var) / np.sqrt(count)
|
| 1080 |
+
|
| 1081 |
+
|
| 1082 |
+
def _nanminmax(meth, fill_value_typ):
|
| 1083 |
+
@bottleneck_switch(name=f"nan{meth}")
|
| 1084 |
+
@_datetimelike_compat
|
| 1085 |
+
def reduction(
|
| 1086 |
+
values: np.ndarray,
|
| 1087 |
+
*,
|
| 1088 |
+
axis: AxisInt | None = None,
|
| 1089 |
+
skipna: bool = True,
|
| 1090 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 1091 |
+
):
|
| 1092 |
+
if values.size == 0:
|
| 1093 |
+
return _na_for_min_count(values, axis)
|
| 1094 |
+
|
| 1095 |
+
values, mask = _get_values(
|
| 1096 |
+
values, skipna, fill_value_typ=fill_value_typ, mask=mask
|
| 1097 |
+
)
|
| 1098 |
+
result = getattr(values, meth)(axis)
|
| 1099 |
+
result = _maybe_null_out(result, axis, mask, values.shape)
|
| 1100 |
+
return result
|
| 1101 |
+
|
| 1102 |
+
return reduction
|
| 1103 |
+
|
| 1104 |
+
|
| 1105 |
+
nanmin = _nanminmax("min", fill_value_typ="+inf")
|
| 1106 |
+
nanmax = _nanminmax("max", fill_value_typ="-inf")
|
| 1107 |
+
|
| 1108 |
+
|
| 1109 |
+
def nanargmax(
|
| 1110 |
+
values: np.ndarray,
|
| 1111 |
+
*,
|
| 1112 |
+
axis: AxisInt | None = None,
|
| 1113 |
+
skipna: bool = True,
|
| 1114 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 1115 |
+
) -> int | np.ndarray:
|
| 1116 |
+
"""
|
| 1117 |
+
Parameters
|
| 1118 |
+
----------
|
| 1119 |
+
values : ndarray
|
| 1120 |
+
axis : int, optional
|
| 1121 |
+
skipna : bool, default True
|
| 1122 |
+
mask : ndarray[bool], optional
|
| 1123 |
+
nan-mask if known
|
| 1124 |
+
|
| 1125 |
+
Returns
|
| 1126 |
+
-------
|
| 1127 |
+
result : int or ndarray[int]
|
| 1128 |
+
The index/indices of max value in specified axis or -1 in the NA case
|
| 1129 |
+
|
| 1130 |
+
Examples
|
| 1131 |
+
--------
|
| 1132 |
+
>>> from pandas.core import nanops
|
| 1133 |
+
>>> arr = np.array([1, 2, 3, np.nan, 4])
|
| 1134 |
+
>>> nanops.nanargmax(arr)
|
| 1135 |
+
4
|
| 1136 |
+
|
| 1137 |
+
>>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
|
| 1138 |
+
>>> arr[2:, 2] = np.nan
|
| 1139 |
+
>>> arr
|
| 1140 |
+
array([[ 0., 1., 2.],
|
| 1141 |
+
[ 3., 4., 5.],
|
| 1142 |
+
[ 6., 7., nan],
|
| 1143 |
+
[ 9., 10., nan]])
|
| 1144 |
+
>>> nanops.nanargmax(arr, axis=1)
|
| 1145 |
+
array([2, 2, 1, 1])
|
| 1146 |
+
"""
|
| 1147 |
+
values, mask = _get_values(values, True, fill_value_typ="-inf", mask=mask)
|
| 1148 |
+
result = values.argmax(axis)
|
| 1149 |
+
# error: Argument 1 to "_maybe_arg_null_out" has incompatible type "Any |
|
| 1150 |
+
# signedinteger[Any]"; expected "ndarray[Any, Any]"
|
| 1151 |
+
result = _maybe_arg_null_out(result, axis, mask, skipna) # type: ignore[arg-type]
|
| 1152 |
+
return result
|
| 1153 |
+
|
| 1154 |
+
|
| 1155 |
+
def nanargmin(
|
| 1156 |
+
values: np.ndarray,
|
| 1157 |
+
*,
|
| 1158 |
+
axis: AxisInt | None = None,
|
| 1159 |
+
skipna: bool = True,
|
| 1160 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 1161 |
+
) -> int | np.ndarray:
|
| 1162 |
+
"""
|
| 1163 |
+
Parameters
|
| 1164 |
+
----------
|
| 1165 |
+
values : ndarray
|
| 1166 |
+
axis : int, optional
|
| 1167 |
+
skipna : bool, default True
|
| 1168 |
+
mask : ndarray[bool], optional
|
| 1169 |
+
nan-mask if known
|
| 1170 |
+
|
| 1171 |
+
Returns
|
| 1172 |
+
-------
|
| 1173 |
+
result : int or ndarray[int]
|
| 1174 |
+
The index/indices of min value in specified axis or -1 in the NA case
|
| 1175 |
+
|
| 1176 |
+
Examples
|
| 1177 |
+
--------
|
| 1178 |
+
>>> from pandas.core import nanops
|
| 1179 |
+
>>> arr = np.array([1, 2, 3, np.nan, 4])
|
| 1180 |
+
>>> nanops.nanargmin(arr)
|
| 1181 |
+
0
|
| 1182 |
+
|
| 1183 |
+
>>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
|
| 1184 |
+
>>> arr[2:, 0] = np.nan
|
| 1185 |
+
>>> arr
|
| 1186 |
+
array([[ 0., 1., 2.],
|
| 1187 |
+
[ 3., 4., 5.],
|
| 1188 |
+
[nan, 7., 8.],
|
| 1189 |
+
[nan, 10., 11.]])
|
| 1190 |
+
>>> nanops.nanargmin(arr, axis=1)
|
| 1191 |
+
array([0, 0, 1, 1])
|
| 1192 |
+
"""
|
| 1193 |
+
values, mask = _get_values(values, True, fill_value_typ="+inf", mask=mask)
|
| 1194 |
+
result = values.argmin(axis)
|
| 1195 |
+
# error: Argument 1 to "_maybe_arg_null_out" has incompatible type "Any |
|
| 1196 |
+
# signedinteger[Any]"; expected "ndarray[Any, Any]"
|
| 1197 |
+
result = _maybe_arg_null_out(result, axis, mask, skipna) # type: ignore[arg-type]
|
| 1198 |
+
return result
|
| 1199 |
+
|
| 1200 |
+
|
| 1201 |
+
@disallow("M8", "m8")
|
| 1202 |
+
@maybe_operate_rowwise
|
| 1203 |
+
def nanskew(
|
| 1204 |
+
values: np.ndarray,
|
| 1205 |
+
*,
|
| 1206 |
+
axis: AxisInt | None = None,
|
| 1207 |
+
skipna: bool = True,
|
| 1208 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 1209 |
+
) -> float:
|
| 1210 |
+
"""
|
| 1211 |
+
Compute the sample skewness.
|
| 1212 |
+
|
| 1213 |
+
The statistic computed here is the adjusted Fisher-Pearson standardized
|
| 1214 |
+
moment coefficient G1. The algorithm computes this coefficient directly
|
| 1215 |
+
from the second and third central moment.
|
| 1216 |
+
|
| 1217 |
+
Parameters
|
| 1218 |
+
----------
|
| 1219 |
+
values : ndarray
|
| 1220 |
+
axis : int, optional
|
| 1221 |
+
skipna : bool, default True
|
| 1222 |
+
mask : ndarray[bool], optional
|
| 1223 |
+
nan-mask if known
|
| 1224 |
+
|
| 1225 |
+
Returns
|
| 1226 |
+
-------
|
| 1227 |
+
result : float64
|
| 1228 |
+
Unless input is a float array, in which case use the same
|
| 1229 |
+
precision as the input array.
|
| 1230 |
+
|
| 1231 |
+
Examples
|
| 1232 |
+
--------
|
| 1233 |
+
>>> from pandas.core import nanops
|
| 1234 |
+
>>> s = pd.Series([1, np.nan, 1, 2])
|
| 1235 |
+
>>> nanops.nanskew(s.values)
|
| 1236 |
+
1.7320508075688787
|
| 1237 |
+
"""
|
| 1238 |
+
mask = _maybe_get_mask(values, skipna, mask)
|
| 1239 |
+
if values.dtype.kind != "f":
|
| 1240 |
+
values = values.astype("f8")
|
| 1241 |
+
count = _get_counts(values.shape, mask, axis)
|
| 1242 |
+
else:
|
| 1243 |
+
count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
|
| 1244 |
+
|
| 1245 |
+
if skipna and mask is not None:
|
| 1246 |
+
values = values.copy()
|
| 1247 |
+
np.putmask(values, mask, 0)
|
| 1248 |
+
elif not skipna and mask is not None and mask.any():
|
| 1249 |
+
return np.nan
|
| 1250 |
+
|
| 1251 |
+
with np.errstate(invalid="ignore", divide="ignore"):
|
| 1252 |
+
mean = values.sum(axis, dtype=np.float64) / count
|
| 1253 |
+
if axis is not None:
|
| 1254 |
+
mean = np.expand_dims(mean, axis)
|
| 1255 |
+
|
| 1256 |
+
adjusted = values - mean
|
| 1257 |
+
if skipna and mask is not None:
|
| 1258 |
+
np.putmask(adjusted, mask, 0)
|
| 1259 |
+
adjusted2 = adjusted**2
|
| 1260 |
+
adjusted3 = adjusted2 * adjusted
|
| 1261 |
+
m2 = adjusted2.sum(axis, dtype=np.float64)
|
| 1262 |
+
m3 = adjusted3.sum(axis, dtype=np.float64)
|
| 1263 |
+
|
| 1264 |
+
# floating point error
|
| 1265 |
+
#
|
| 1266 |
+
# #18044 in _libs/windows.pyx calc_skew follow this behavior
|
| 1267 |
+
# to fix the fperr to treat m2 <1e-14 as zero
|
| 1268 |
+
m2 = _zero_out_fperr(m2)
|
| 1269 |
+
m3 = _zero_out_fperr(m3)
|
| 1270 |
+
|
| 1271 |
+
with np.errstate(invalid="ignore", divide="ignore"):
|
| 1272 |
+
result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2**1.5)
|
| 1273 |
+
|
| 1274 |
+
dtype = values.dtype
|
| 1275 |
+
if dtype.kind == "f":
|
| 1276 |
+
result = result.astype(dtype, copy=False)
|
| 1277 |
+
|
| 1278 |
+
if isinstance(result, np.ndarray):
|
| 1279 |
+
result = np.where(m2 == 0, 0, result)
|
| 1280 |
+
result[count < 3] = np.nan
|
| 1281 |
+
else:
|
| 1282 |
+
result = dtype.type(0) if m2 == 0 else result
|
| 1283 |
+
if count < 3:
|
| 1284 |
+
return np.nan
|
| 1285 |
+
|
| 1286 |
+
return result
|
| 1287 |
+
|
| 1288 |
+
|
| 1289 |
+
@disallow("M8", "m8")
|
| 1290 |
+
@maybe_operate_rowwise
|
| 1291 |
+
def nankurt(
|
| 1292 |
+
values: np.ndarray,
|
| 1293 |
+
*,
|
| 1294 |
+
axis: AxisInt | None = None,
|
| 1295 |
+
skipna: bool = True,
|
| 1296 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 1297 |
+
) -> float:
|
| 1298 |
+
"""
|
| 1299 |
+
Compute the sample excess kurtosis
|
| 1300 |
+
|
| 1301 |
+
The statistic computed here is the adjusted Fisher-Pearson standardized
|
| 1302 |
+
moment coefficient G2, computed directly from the second and fourth
|
| 1303 |
+
central moment.
|
| 1304 |
+
|
| 1305 |
+
Parameters
|
| 1306 |
+
----------
|
| 1307 |
+
values : ndarray
|
| 1308 |
+
axis : int, optional
|
| 1309 |
+
skipna : bool, default True
|
| 1310 |
+
mask : ndarray[bool], optional
|
| 1311 |
+
nan-mask if known
|
| 1312 |
+
|
| 1313 |
+
Returns
|
| 1314 |
+
-------
|
| 1315 |
+
result : float64
|
| 1316 |
+
Unless input is a float array, in which case use the same
|
| 1317 |
+
precision as the input array.
|
| 1318 |
+
|
| 1319 |
+
Examples
|
| 1320 |
+
--------
|
| 1321 |
+
>>> from pandas.core import nanops
|
| 1322 |
+
>>> s = pd.Series([1, np.nan, 1, 3, 2])
|
| 1323 |
+
>>> nanops.nankurt(s.values)
|
| 1324 |
+
-1.2892561983471076
|
| 1325 |
+
"""
|
| 1326 |
+
mask = _maybe_get_mask(values, skipna, mask)
|
| 1327 |
+
if values.dtype.kind != "f":
|
| 1328 |
+
values = values.astype("f8")
|
| 1329 |
+
count = _get_counts(values.shape, mask, axis)
|
| 1330 |
+
else:
|
| 1331 |
+
count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
|
| 1332 |
+
|
| 1333 |
+
if skipna and mask is not None:
|
| 1334 |
+
values = values.copy()
|
| 1335 |
+
np.putmask(values, mask, 0)
|
| 1336 |
+
elif not skipna and mask is not None and mask.any():
|
| 1337 |
+
return np.nan
|
| 1338 |
+
|
| 1339 |
+
with np.errstate(invalid="ignore", divide="ignore"):
|
| 1340 |
+
mean = values.sum(axis, dtype=np.float64) / count
|
| 1341 |
+
if axis is not None:
|
| 1342 |
+
mean = np.expand_dims(mean, axis)
|
| 1343 |
+
|
| 1344 |
+
adjusted = values - mean
|
| 1345 |
+
if skipna and mask is not None:
|
| 1346 |
+
np.putmask(adjusted, mask, 0)
|
| 1347 |
+
adjusted2 = adjusted**2
|
| 1348 |
+
adjusted4 = adjusted2**2
|
| 1349 |
+
m2 = adjusted2.sum(axis, dtype=np.float64)
|
| 1350 |
+
m4 = adjusted4.sum(axis, dtype=np.float64)
|
| 1351 |
+
|
| 1352 |
+
with np.errstate(invalid="ignore", divide="ignore"):
|
| 1353 |
+
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
|
| 1354 |
+
numerator = count * (count + 1) * (count - 1) * m4
|
| 1355 |
+
denominator = (count - 2) * (count - 3) * m2**2
|
| 1356 |
+
|
| 1357 |
+
# floating point error
|
| 1358 |
+
#
|
| 1359 |
+
# #18044 in _libs/windows.pyx calc_kurt follow this behavior
|
| 1360 |
+
# to fix the fperr to treat denom <1e-14 as zero
|
| 1361 |
+
numerator = _zero_out_fperr(numerator)
|
| 1362 |
+
denominator = _zero_out_fperr(denominator)
|
| 1363 |
+
|
| 1364 |
+
if not isinstance(denominator, np.ndarray):
|
| 1365 |
+
# if ``denom`` is a scalar, check these corner cases first before
|
| 1366 |
+
# doing division
|
| 1367 |
+
if count < 4:
|
| 1368 |
+
return np.nan
|
| 1369 |
+
if denominator == 0:
|
| 1370 |
+
return values.dtype.type(0)
|
| 1371 |
+
|
| 1372 |
+
with np.errstate(invalid="ignore", divide="ignore"):
|
| 1373 |
+
result = numerator / denominator - adj
|
| 1374 |
+
|
| 1375 |
+
dtype = values.dtype
|
| 1376 |
+
if dtype.kind == "f":
|
| 1377 |
+
result = result.astype(dtype, copy=False)
|
| 1378 |
+
|
| 1379 |
+
if isinstance(result, np.ndarray):
|
| 1380 |
+
result = np.where(denominator == 0, 0, result)
|
| 1381 |
+
result[count < 4] = np.nan
|
| 1382 |
+
|
| 1383 |
+
return result
|
| 1384 |
+
|
| 1385 |
+
|
| 1386 |
+
@disallow("M8", "m8")
|
| 1387 |
+
@maybe_operate_rowwise
|
| 1388 |
+
def nanprod(
|
| 1389 |
+
values: np.ndarray,
|
| 1390 |
+
*,
|
| 1391 |
+
axis: AxisInt | None = None,
|
| 1392 |
+
skipna: bool = True,
|
| 1393 |
+
min_count: int = 0,
|
| 1394 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 1395 |
+
) -> float:
|
| 1396 |
+
"""
|
| 1397 |
+
Parameters
|
| 1398 |
+
----------
|
| 1399 |
+
values : ndarray[dtype]
|
| 1400 |
+
axis : int, optional
|
| 1401 |
+
skipna : bool, default True
|
| 1402 |
+
min_count: int, default 0
|
| 1403 |
+
mask : ndarray[bool], optional
|
| 1404 |
+
nan-mask if known
|
| 1405 |
+
|
| 1406 |
+
Returns
|
| 1407 |
+
-------
|
| 1408 |
+
Dtype
|
| 1409 |
+
The product of all elements on a given axis. ( NaNs are treated as 1)
|
| 1410 |
+
|
| 1411 |
+
Examples
|
| 1412 |
+
--------
|
| 1413 |
+
>>> from pandas.core import nanops
|
| 1414 |
+
>>> s = pd.Series([1, 2, 3, np.nan])
|
| 1415 |
+
>>> nanops.nanprod(s.values)
|
| 1416 |
+
6.0
|
| 1417 |
+
"""
|
| 1418 |
+
mask = _maybe_get_mask(values, skipna, mask)
|
| 1419 |
+
|
| 1420 |
+
if skipna and mask is not None:
|
| 1421 |
+
values = values.copy()
|
| 1422 |
+
values[mask] = 1
|
| 1423 |
+
result = values.prod(axis)
|
| 1424 |
+
# error: Incompatible return value type (got "Union[ndarray, float]", expected
|
| 1425 |
+
# "float")
|
| 1426 |
+
return _maybe_null_out( # type: ignore[return-value]
|
| 1427 |
+
result, axis, mask, values.shape, min_count=min_count
|
| 1428 |
+
)
|
| 1429 |
+
|
| 1430 |
+
|
| 1431 |
+
def _maybe_arg_null_out(
|
| 1432 |
+
result: np.ndarray,
|
| 1433 |
+
axis: AxisInt | None,
|
| 1434 |
+
mask: npt.NDArray[np.bool_] | None,
|
| 1435 |
+
skipna: bool,
|
| 1436 |
+
) -> np.ndarray | int:
|
| 1437 |
+
# helper function for nanargmin/nanargmax
|
| 1438 |
+
if mask is None:
|
| 1439 |
+
return result
|
| 1440 |
+
|
| 1441 |
+
if axis is None or not getattr(result, "ndim", False):
|
| 1442 |
+
if skipna:
|
| 1443 |
+
if mask.all():
|
| 1444 |
+
return -1
|
| 1445 |
+
else:
|
| 1446 |
+
if mask.any():
|
| 1447 |
+
return -1
|
| 1448 |
+
else:
|
| 1449 |
+
if skipna:
|
| 1450 |
+
na_mask = mask.all(axis)
|
| 1451 |
+
else:
|
| 1452 |
+
na_mask = mask.any(axis)
|
| 1453 |
+
if na_mask.any():
|
| 1454 |
+
result[na_mask] = -1
|
| 1455 |
+
return result
|
| 1456 |
+
|
| 1457 |
+
|
| 1458 |
+
def _get_counts(
|
| 1459 |
+
values_shape: Shape,
|
| 1460 |
+
mask: npt.NDArray[np.bool_] | None,
|
| 1461 |
+
axis: AxisInt | None,
|
| 1462 |
+
dtype: np.dtype[np.floating] = np.dtype(np.float64),
|
| 1463 |
+
) -> np.floating | npt.NDArray[np.floating]:
|
| 1464 |
+
"""
|
| 1465 |
+
Get the count of non-null values along an axis
|
| 1466 |
+
|
| 1467 |
+
Parameters
|
| 1468 |
+
----------
|
| 1469 |
+
values_shape : tuple of int
|
| 1470 |
+
shape tuple from values ndarray, used if mask is None
|
| 1471 |
+
mask : Optional[ndarray[bool]]
|
| 1472 |
+
locations in values that should be considered missing
|
| 1473 |
+
axis : Optional[int]
|
| 1474 |
+
axis to count along
|
| 1475 |
+
dtype : type, optional
|
| 1476 |
+
type to use for count
|
| 1477 |
+
|
| 1478 |
+
Returns
|
| 1479 |
+
-------
|
| 1480 |
+
count : scalar or array
|
| 1481 |
+
"""
|
| 1482 |
+
if axis is None:
|
| 1483 |
+
if mask is not None:
|
| 1484 |
+
n = mask.size - mask.sum()
|
| 1485 |
+
else:
|
| 1486 |
+
n = np.prod(values_shape)
|
| 1487 |
+
return dtype.type(n)
|
| 1488 |
+
|
| 1489 |
+
if mask is not None:
|
| 1490 |
+
count = mask.shape[axis] - mask.sum(axis)
|
| 1491 |
+
else:
|
| 1492 |
+
count = values_shape[axis]
|
| 1493 |
+
|
| 1494 |
+
if is_integer(count):
|
| 1495 |
+
return dtype.type(count)
|
| 1496 |
+
return count.astype(dtype, copy=False)
|
| 1497 |
+
|
| 1498 |
+
|
| 1499 |
+
def _maybe_null_out(
|
| 1500 |
+
result: np.ndarray | float | NaTType,
|
| 1501 |
+
axis: AxisInt | None,
|
| 1502 |
+
mask: npt.NDArray[np.bool_] | None,
|
| 1503 |
+
shape: tuple[int, ...],
|
| 1504 |
+
min_count: int = 1,
|
| 1505 |
+
) -> np.ndarray | float | NaTType:
|
| 1506 |
+
"""
|
| 1507 |
+
Returns
|
| 1508 |
+
-------
|
| 1509 |
+
Dtype
|
| 1510 |
+
The product of all elements on a given axis. ( NaNs are treated as 1)
|
| 1511 |
+
"""
|
| 1512 |
+
if mask is None and min_count == 0:
|
| 1513 |
+
# nothing to check; short-circuit
|
| 1514 |
+
return result
|
| 1515 |
+
|
| 1516 |
+
if axis is not None and isinstance(result, np.ndarray):
|
| 1517 |
+
if mask is not None:
|
| 1518 |
+
null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0
|
| 1519 |
+
else:
|
| 1520 |
+
# we have no nulls, kept mask=None in _maybe_get_mask
|
| 1521 |
+
below_count = shape[axis] - min_count < 0
|
| 1522 |
+
new_shape = shape[:axis] + shape[axis + 1 :]
|
| 1523 |
+
null_mask = np.broadcast_to(below_count, new_shape)
|
| 1524 |
+
|
| 1525 |
+
if np.any(null_mask):
|
| 1526 |
+
if is_numeric_dtype(result):
|
| 1527 |
+
if np.iscomplexobj(result):
|
| 1528 |
+
result = result.astype("c16")
|
| 1529 |
+
elif not is_float_dtype(result):
|
| 1530 |
+
result = result.astype("f8", copy=False)
|
| 1531 |
+
result[null_mask] = np.nan
|
| 1532 |
+
else:
|
| 1533 |
+
# GH12941, use None to auto cast null
|
| 1534 |
+
result[null_mask] = None
|
| 1535 |
+
elif result is not NaT:
|
| 1536 |
+
if check_below_min_count(shape, mask, min_count):
|
| 1537 |
+
result_dtype = getattr(result, "dtype", None)
|
| 1538 |
+
if is_float_dtype(result_dtype):
|
| 1539 |
+
# error: Item "None" of "Optional[Any]" has no attribute "type"
|
| 1540 |
+
result = result_dtype.type("nan") # type: ignore[union-attr]
|
| 1541 |
+
else:
|
| 1542 |
+
result = np.nan
|
| 1543 |
+
|
| 1544 |
+
return result
|
| 1545 |
+
|
| 1546 |
+
|
| 1547 |
+
def check_below_min_count(
|
| 1548 |
+
shape: tuple[int, ...], mask: npt.NDArray[np.bool_] | None, min_count: int
|
| 1549 |
+
) -> bool:
|
| 1550 |
+
"""
|
| 1551 |
+
Check for the `min_count` keyword. Returns True if below `min_count` (when
|
| 1552 |
+
missing value should be returned from the reduction).
|
| 1553 |
+
|
| 1554 |
+
Parameters
|
| 1555 |
+
----------
|
| 1556 |
+
shape : tuple
|
| 1557 |
+
The shape of the values (`values.shape`).
|
| 1558 |
+
mask : ndarray[bool] or None
|
| 1559 |
+
Boolean numpy array (typically of same shape as `shape`) or None.
|
| 1560 |
+
min_count : int
|
| 1561 |
+
Keyword passed through from sum/prod call.
|
| 1562 |
+
|
| 1563 |
+
Returns
|
| 1564 |
+
-------
|
| 1565 |
+
bool
|
| 1566 |
+
"""
|
| 1567 |
+
if min_count > 0:
|
| 1568 |
+
if mask is None:
|
| 1569 |
+
# no missing values, only check size
|
| 1570 |
+
non_nulls = np.prod(shape)
|
| 1571 |
+
else:
|
| 1572 |
+
non_nulls = mask.size - mask.sum()
|
| 1573 |
+
if non_nulls < min_count:
|
| 1574 |
+
return True
|
| 1575 |
+
return False
|
| 1576 |
+
|
| 1577 |
+
|
| 1578 |
+
def _zero_out_fperr(arg):
|
| 1579 |
+
# #18044 reference this behavior to fix rolling skew/kurt issue
|
| 1580 |
+
if isinstance(arg, np.ndarray):
|
| 1581 |
+
return np.where(np.abs(arg) < 1e-14, 0, arg)
|
| 1582 |
+
else:
|
| 1583 |
+
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
|
| 1584 |
+
|
| 1585 |
+
|
| 1586 |
+
@disallow("M8", "m8")
|
| 1587 |
+
def nancorr(
|
| 1588 |
+
a: np.ndarray,
|
| 1589 |
+
b: np.ndarray,
|
| 1590 |
+
*,
|
| 1591 |
+
method: CorrelationMethod = "pearson",
|
| 1592 |
+
min_periods: int | None = None,
|
| 1593 |
+
) -> float:
|
| 1594 |
+
"""
|
| 1595 |
+
a, b: ndarrays
|
| 1596 |
+
"""
|
| 1597 |
+
if len(a) != len(b):
|
| 1598 |
+
raise AssertionError("Operands to nancorr must have same size")
|
| 1599 |
+
|
| 1600 |
+
if min_periods is None:
|
| 1601 |
+
min_periods = 1
|
| 1602 |
+
|
| 1603 |
+
valid = notna(a) & notna(b)
|
| 1604 |
+
if not valid.all():
|
| 1605 |
+
a = a[valid]
|
| 1606 |
+
b = b[valid]
|
| 1607 |
+
|
| 1608 |
+
if len(a) < min_periods:
|
| 1609 |
+
return np.nan
|
| 1610 |
+
|
| 1611 |
+
a = _ensure_numeric(a)
|
| 1612 |
+
b = _ensure_numeric(b)
|
| 1613 |
+
|
| 1614 |
+
f = get_corr_func(method)
|
| 1615 |
+
return f(a, b)
|
| 1616 |
+
|
| 1617 |
+
|
| 1618 |
+
def get_corr_func(
|
| 1619 |
+
method: CorrelationMethod,
|
| 1620 |
+
) -> Callable[[np.ndarray, np.ndarray], float]:
|
| 1621 |
+
if method == "kendall":
|
| 1622 |
+
from scipy.stats import kendalltau
|
| 1623 |
+
|
| 1624 |
+
def func(a, b):
|
| 1625 |
+
return kendalltau(a, b)[0]
|
| 1626 |
+
|
| 1627 |
+
return func
|
| 1628 |
+
elif method == "spearman":
|
| 1629 |
+
from scipy.stats import spearmanr
|
| 1630 |
+
|
| 1631 |
+
def func(a, b):
|
| 1632 |
+
return spearmanr(a, b)[0]
|
| 1633 |
+
|
| 1634 |
+
return func
|
| 1635 |
+
elif method == "pearson":
|
| 1636 |
+
|
| 1637 |
+
def func(a, b):
|
| 1638 |
+
return np.corrcoef(a, b)[0, 1]
|
| 1639 |
+
|
| 1640 |
+
return func
|
| 1641 |
+
elif callable(method):
|
| 1642 |
+
return method
|
| 1643 |
+
|
| 1644 |
+
raise ValueError(
|
| 1645 |
+
f"Unknown method '{method}', expected one of "
|
| 1646 |
+
"'kendall', 'spearman', 'pearson', or callable"
|
| 1647 |
+
)
|
| 1648 |
+
|
| 1649 |
+
|
| 1650 |
+
@disallow("M8", "m8")
|
| 1651 |
+
def nancov(
|
| 1652 |
+
a: np.ndarray,
|
| 1653 |
+
b: np.ndarray,
|
| 1654 |
+
*,
|
| 1655 |
+
min_periods: int | None = None,
|
| 1656 |
+
ddof: int | None = 1,
|
| 1657 |
+
) -> float:
|
| 1658 |
+
if len(a) != len(b):
|
| 1659 |
+
raise AssertionError("Operands to nancov must have same size")
|
| 1660 |
+
|
| 1661 |
+
if min_periods is None:
|
| 1662 |
+
min_periods = 1
|
| 1663 |
+
|
| 1664 |
+
valid = notna(a) & notna(b)
|
| 1665 |
+
if not valid.all():
|
| 1666 |
+
a = a[valid]
|
| 1667 |
+
b = b[valid]
|
| 1668 |
+
|
| 1669 |
+
if len(a) < min_periods:
|
| 1670 |
+
return np.nan
|
| 1671 |
+
|
| 1672 |
+
a = _ensure_numeric(a)
|
| 1673 |
+
b = _ensure_numeric(b)
|
| 1674 |
+
|
| 1675 |
+
return np.cov(a, b, ddof=ddof)[0, 1]
|
| 1676 |
+
|
| 1677 |
+
|
| 1678 |
+
def _ensure_numeric(x):
|
| 1679 |
+
if isinstance(x, np.ndarray):
|
| 1680 |
+
if x.dtype.kind in "biu":
|
| 1681 |
+
x = x.astype(np.float64)
|
| 1682 |
+
elif x.dtype == object:
|
| 1683 |
+
inferred = lib.infer_dtype(x)
|
| 1684 |
+
if inferred in ["string", "mixed"]:
|
| 1685 |
+
# GH#44008, GH#36703 avoid casting e.g. strings to numeric
|
| 1686 |
+
raise TypeError(f"Could not convert {x} to numeric")
|
| 1687 |
+
try:
|
| 1688 |
+
x = x.astype(np.complex128)
|
| 1689 |
+
except (TypeError, ValueError):
|
| 1690 |
+
try:
|
| 1691 |
+
x = x.astype(np.float64)
|
| 1692 |
+
except ValueError as err:
|
| 1693 |
+
# GH#29941 we get here with object arrays containing strs
|
| 1694 |
+
raise TypeError(f"Could not convert {x} to numeric") from err
|
| 1695 |
+
else:
|
| 1696 |
+
if not np.any(np.imag(x)):
|
| 1697 |
+
x = x.real
|
| 1698 |
+
elif not (is_float(x) or is_integer(x) or is_complex(x)):
|
| 1699 |
+
if isinstance(x, str):
|
| 1700 |
+
# GH#44008, GH#36703 avoid casting e.g. strings to numeric
|
| 1701 |
+
raise TypeError(f"Could not convert string '{x}' to numeric")
|
| 1702 |
+
try:
|
| 1703 |
+
x = float(x)
|
| 1704 |
+
except (TypeError, ValueError):
|
| 1705 |
+
# e.g. "1+1j" or "foo"
|
| 1706 |
+
try:
|
| 1707 |
+
x = complex(x)
|
| 1708 |
+
except ValueError as err:
|
| 1709 |
+
# e.g. "foo"
|
| 1710 |
+
raise TypeError(f"Could not convert {x} to numeric") from err
|
| 1711 |
+
return x
|
| 1712 |
+
|
| 1713 |
+
|
| 1714 |
+
def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
|
| 1715 |
+
"""
|
| 1716 |
+
Cumulative function with skipna support.
|
| 1717 |
+
|
| 1718 |
+
Parameters
|
| 1719 |
+
----------
|
| 1720 |
+
values : np.ndarray or ExtensionArray
|
| 1721 |
+
accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate}
|
| 1722 |
+
skipna : bool
|
| 1723 |
+
|
| 1724 |
+
Returns
|
| 1725 |
+
-------
|
| 1726 |
+
np.ndarray or ExtensionArray
|
| 1727 |
+
"""
|
| 1728 |
+
mask_a, mask_b = {
|
| 1729 |
+
np.cumprod: (1.0, np.nan),
|
| 1730 |
+
np.maximum.accumulate: (-np.inf, np.nan),
|
| 1731 |
+
np.cumsum: (0.0, np.nan),
|
| 1732 |
+
np.minimum.accumulate: (np.inf, np.nan),
|
| 1733 |
+
}[accum_func]
|
| 1734 |
+
|
| 1735 |
+
# This should go through ea interface
|
| 1736 |
+
assert values.dtype.kind not in "mM"
|
| 1737 |
+
|
| 1738 |
+
# We will be applying this function to block values
|
| 1739 |
+
if skipna and not issubclass(values.dtype.type, (np.integer, np.bool_)):
|
| 1740 |
+
vals = values.copy()
|
| 1741 |
+
mask = isna(vals)
|
| 1742 |
+
vals[mask] = mask_a
|
| 1743 |
+
result = accum_func(vals, axis=0)
|
| 1744 |
+
result[mask] = mask_b
|
| 1745 |
+
else:
|
| 1746 |
+
result = accum_func(values, axis=0)
|
| 1747 |
+
|
| 1748 |
+
return result
|
videollama2/lib/python3.10/site-packages/pandas/core/resample.py
ADDED
|
@@ -0,0 +1,2920 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import copy
|
| 4 |
+
from textwrap import dedent
|
| 5 |
+
from typing import (
|
| 6 |
+
TYPE_CHECKING,
|
| 7 |
+
Callable,
|
| 8 |
+
Literal,
|
| 9 |
+
cast,
|
| 10 |
+
final,
|
| 11 |
+
no_type_check,
|
| 12 |
+
)
|
| 13 |
+
import warnings
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
from pandas._libs import lib
|
| 18 |
+
from pandas._libs.tslibs import (
|
| 19 |
+
BaseOffset,
|
| 20 |
+
IncompatibleFrequency,
|
| 21 |
+
NaT,
|
| 22 |
+
Period,
|
| 23 |
+
Timedelta,
|
| 24 |
+
Timestamp,
|
| 25 |
+
to_offset,
|
| 26 |
+
)
|
| 27 |
+
from pandas._libs.tslibs.dtypes import freq_to_period_freqstr
|
| 28 |
+
from pandas._typing import NDFrameT
|
| 29 |
+
from pandas.compat.numpy import function as nv
|
| 30 |
+
from pandas.errors import AbstractMethodError
|
| 31 |
+
from pandas.util._decorators import (
|
| 32 |
+
Appender,
|
| 33 |
+
Substitution,
|
| 34 |
+
doc,
|
| 35 |
+
)
|
| 36 |
+
from pandas.util._exceptions import (
|
| 37 |
+
find_stack_level,
|
| 38 |
+
rewrite_warning,
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
from pandas.core.dtypes.dtypes import ArrowDtype
|
| 42 |
+
from pandas.core.dtypes.generic import (
|
| 43 |
+
ABCDataFrame,
|
| 44 |
+
ABCSeries,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
import pandas.core.algorithms as algos
|
| 48 |
+
from pandas.core.apply import (
|
| 49 |
+
ResamplerWindowApply,
|
| 50 |
+
warn_alias_replacement,
|
| 51 |
+
)
|
| 52 |
+
from pandas.core.arrays import ArrowExtensionArray
|
| 53 |
+
from pandas.core.base import (
|
| 54 |
+
PandasObject,
|
| 55 |
+
SelectionMixin,
|
| 56 |
+
)
|
| 57 |
+
import pandas.core.common as com
|
| 58 |
+
from pandas.core.generic import (
|
| 59 |
+
NDFrame,
|
| 60 |
+
_shared_docs,
|
| 61 |
+
)
|
| 62 |
+
from pandas.core.groupby.generic import SeriesGroupBy
|
| 63 |
+
from pandas.core.groupby.groupby import (
|
| 64 |
+
BaseGroupBy,
|
| 65 |
+
GroupBy,
|
| 66 |
+
_apply_groupings_depr,
|
| 67 |
+
_pipe_template,
|
| 68 |
+
get_groupby,
|
| 69 |
+
)
|
| 70 |
+
from pandas.core.groupby.grouper import Grouper
|
| 71 |
+
from pandas.core.groupby.ops import BinGrouper
|
| 72 |
+
from pandas.core.indexes.api import MultiIndex
|
| 73 |
+
from pandas.core.indexes.base import Index
|
| 74 |
+
from pandas.core.indexes.datetimes import (
|
| 75 |
+
DatetimeIndex,
|
| 76 |
+
date_range,
|
| 77 |
+
)
|
| 78 |
+
from pandas.core.indexes.period import (
|
| 79 |
+
PeriodIndex,
|
| 80 |
+
period_range,
|
| 81 |
+
)
|
| 82 |
+
from pandas.core.indexes.timedeltas import (
|
| 83 |
+
TimedeltaIndex,
|
| 84 |
+
timedelta_range,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
from pandas.tseries.frequencies import (
|
| 88 |
+
is_subperiod,
|
| 89 |
+
is_superperiod,
|
| 90 |
+
)
|
| 91 |
+
from pandas.tseries.offsets import (
|
| 92 |
+
Day,
|
| 93 |
+
Tick,
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
if TYPE_CHECKING:
|
| 97 |
+
from collections.abc import Hashable
|
| 98 |
+
|
| 99 |
+
from pandas._typing import (
|
| 100 |
+
AnyArrayLike,
|
| 101 |
+
Axis,
|
| 102 |
+
AxisInt,
|
| 103 |
+
Frequency,
|
| 104 |
+
IndexLabel,
|
| 105 |
+
InterpolateOptions,
|
| 106 |
+
T,
|
| 107 |
+
TimedeltaConvertibleTypes,
|
| 108 |
+
TimeGrouperOrigin,
|
| 109 |
+
TimestampConvertibleTypes,
|
| 110 |
+
npt,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
from pandas import (
|
| 114 |
+
DataFrame,
|
| 115 |
+
Series,
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
_shared_docs_kwargs: dict[str, str] = {}
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class Resampler(BaseGroupBy, PandasObject):
|
| 122 |
+
"""
|
| 123 |
+
Class for resampling datetimelike data, a groupby-like operation.
|
| 124 |
+
See aggregate, transform, and apply functions on this object.
|
| 125 |
+
|
| 126 |
+
It's easiest to use obj.resample(...) to use Resampler.
|
| 127 |
+
|
| 128 |
+
Parameters
|
| 129 |
+
----------
|
| 130 |
+
obj : Series or DataFrame
|
| 131 |
+
groupby : TimeGrouper
|
| 132 |
+
axis : int, default 0
|
| 133 |
+
kind : str or None
|
| 134 |
+
'period', 'timestamp' to override default index treatment
|
| 135 |
+
|
| 136 |
+
Returns
|
| 137 |
+
-------
|
| 138 |
+
a Resampler of the appropriate type
|
| 139 |
+
|
| 140 |
+
Notes
|
| 141 |
+
-----
|
| 142 |
+
After resampling, see aggregate, apply, and transform functions.
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
_grouper: BinGrouper
|
| 146 |
+
_timegrouper: TimeGrouper
|
| 147 |
+
binner: DatetimeIndex | TimedeltaIndex | PeriodIndex # depends on subclass
|
| 148 |
+
exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat
|
| 149 |
+
_internal_names_set = set({"obj", "ax", "_indexer"})
|
| 150 |
+
|
| 151 |
+
# to the groupby descriptor
|
| 152 |
+
_attributes = [
|
| 153 |
+
"freq",
|
| 154 |
+
"axis",
|
| 155 |
+
"closed",
|
| 156 |
+
"label",
|
| 157 |
+
"convention",
|
| 158 |
+
"kind",
|
| 159 |
+
"origin",
|
| 160 |
+
"offset",
|
| 161 |
+
]
|
| 162 |
+
|
| 163 |
+
def __init__(
|
| 164 |
+
self,
|
| 165 |
+
obj: NDFrame,
|
| 166 |
+
timegrouper: TimeGrouper,
|
| 167 |
+
axis: Axis = 0,
|
| 168 |
+
kind=None,
|
| 169 |
+
*,
|
| 170 |
+
gpr_index: Index,
|
| 171 |
+
group_keys: bool = False,
|
| 172 |
+
selection=None,
|
| 173 |
+
include_groups: bool = True,
|
| 174 |
+
) -> None:
|
| 175 |
+
self._timegrouper = timegrouper
|
| 176 |
+
self.keys = None
|
| 177 |
+
self.sort = True
|
| 178 |
+
self.axis = obj._get_axis_number(axis)
|
| 179 |
+
self.kind = kind
|
| 180 |
+
self.group_keys = group_keys
|
| 181 |
+
self.as_index = True
|
| 182 |
+
self.include_groups = include_groups
|
| 183 |
+
|
| 184 |
+
self.obj, self.ax, self._indexer = self._timegrouper._set_grouper(
|
| 185 |
+
self._convert_obj(obj), sort=True, gpr_index=gpr_index
|
| 186 |
+
)
|
| 187 |
+
self.binner, self._grouper = self._get_binner()
|
| 188 |
+
self._selection = selection
|
| 189 |
+
if self._timegrouper.key is not None:
|
| 190 |
+
self.exclusions = frozenset([self._timegrouper.key])
|
| 191 |
+
else:
|
| 192 |
+
self.exclusions = frozenset()
|
| 193 |
+
|
| 194 |
+
@final
|
| 195 |
+
def __str__(self) -> str:
|
| 196 |
+
"""
|
| 197 |
+
Provide a nice str repr of our rolling object.
|
| 198 |
+
"""
|
| 199 |
+
attrs = (
|
| 200 |
+
f"{k}={getattr(self._timegrouper, k)}"
|
| 201 |
+
for k in self._attributes
|
| 202 |
+
if getattr(self._timegrouper, k, None) is not None
|
| 203 |
+
)
|
| 204 |
+
return f"{type(self).__name__} [{', '.join(attrs)}]"
|
| 205 |
+
|
| 206 |
+
@final
|
| 207 |
+
def __getattr__(self, attr: str):
|
| 208 |
+
if attr in self._internal_names_set:
|
| 209 |
+
return object.__getattribute__(self, attr)
|
| 210 |
+
if attr in self._attributes:
|
| 211 |
+
return getattr(self._timegrouper, attr)
|
| 212 |
+
if attr in self.obj:
|
| 213 |
+
return self[attr]
|
| 214 |
+
|
| 215 |
+
return object.__getattribute__(self, attr)
|
| 216 |
+
|
| 217 |
+
@final
|
| 218 |
+
@property
|
| 219 |
+
def _from_selection(self) -> bool:
|
| 220 |
+
"""
|
| 221 |
+
Is the resampling from a DataFrame column or MultiIndex level.
|
| 222 |
+
"""
|
| 223 |
+
# upsampling and PeriodIndex resampling do not work
|
| 224 |
+
# with selection, this state used to catch and raise an error
|
| 225 |
+
return self._timegrouper is not None and (
|
| 226 |
+
self._timegrouper.key is not None or self._timegrouper.level is not None
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
def _convert_obj(self, obj: NDFrameT) -> NDFrameT:
|
| 230 |
+
"""
|
| 231 |
+
Provide any conversions for the object in order to correctly handle.
|
| 232 |
+
|
| 233 |
+
Parameters
|
| 234 |
+
----------
|
| 235 |
+
obj : Series or DataFrame
|
| 236 |
+
|
| 237 |
+
Returns
|
| 238 |
+
-------
|
| 239 |
+
Series or DataFrame
|
| 240 |
+
"""
|
| 241 |
+
return obj._consolidate()
|
| 242 |
+
|
| 243 |
+
def _get_binner_for_time(self):
|
| 244 |
+
raise AbstractMethodError(self)
|
| 245 |
+
|
| 246 |
+
@final
|
| 247 |
+
def _get_binner(self):
|
| 248 |
+
"""
|
| 249 |
+
Create the BinGrouper, assume that self.set_grouper(obj)
|
| 250 |
+
has already been called.
|
| 251 |
+
"""
|
| 252 |
+
binner, bins, binlabels = self._get_binner_for_time()
|
| 253 |
+
assert len(bins) == len(binlabels)
|
| 254 |
+
bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer)
|
| 255 |
+
return binner, bin_grouper
|
| 256 |
+
|
| 257 |
+
@final
|
| 258 |
+
@Substitution(
|
| 259 |
+
klass="Resampler",
|
| 260 |
+
examples="""
|
| 261 |
+
>>> df = pd.DataFrame({'A': [1, 2, 3, 4]},
|
| 262 |
+
... index=pd.date_range('2012-08-02', periods=4))
|
| 263 |
+
>>> df
|
| 264 |
+
A
|
| 265 |
+
2012-08-02 1
|
| 266 |
+
2012-08-03 2
|
| 267 |
+
2012-08-04 3
|
| 268 |
+
2012-08-05 4
|
| 269 |
+
|
| 270 |
+
To get the difference between each 2-day period's maximum and minimum
|
| 271 |
+
value in one pass, you can do
|
| 272 |
+
|
| 273 |
+
>>> df.resample('2D').pipe(lambda x: x.max() - x.min())
|
| 274 |
+
A
|
| 275 |
+
2012-08-02 1
|
| 276 |
+
2012-08-04 1""",
|
| 277 |
+
)
|
| 278 |
+
@Appender(_pipe_template)
|
| 279 |
+
def pipe(
|
| 280 |
+
self,
|
| 281 |
+
func: Callable[..., T] | tuple[Callable[..., T], str],
|
| 282 |
+
*args,
|
| 283 |
+
**kwargs,
|
| 284 |
+
) -> T:
|
| 285 |
+
return super().pipe(func, *args, **kwargs)
|
| 286 |
+
|
| 287 |
+
_agg_see_also_doc = dedent(
|
| 288 |
+
"""
|
| 289 |
+
See Also
|
| 290 |
+
--------
|
| 291 |
+
DataFrame.groupby.aggregate : Aggregate using callable, string, dict,
|
| 292 |
+
or list of string/callables.
|
| 293 |
+
DataFrame.resample.transform : Transforms the Series on each group
|
| 294 |
+
based on the given function.
|
| 295 |
+
DataFrame.aggregate: Aggregate using one or more
|
| 296 |
+
operations over the specified axis.
|
| 297 |
+
"""
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
_agg_examples_doc = dedent(
|
| 301 |
+
"""
|
| 302 |
+
Examples
|
| 303 |
+
--------
|
| 304 |
+
>>> s = pd.Series([1, 2, 3, 4, 5],
|
| 305 |
+
... index=pd.date_range('20130101', periods=5, freq='s'))
|
| 306 |
+
>>> s
|
| 307 |
+
2013-01-01 00:00:00 1
|
| 308 |
+
2013-01-01 00:00:01 2
|
| 309 |
+
2013-01-01 00:00:02 3
|
| 310 |
+
2013-01-01 00:00:03 4
|
| 311 |
+
2013-01-01 00:00:04 5
|
| 312 |
+
Freq: s, dtype: int64
|
| 313 |
+
|
| 314 |
+
>>> r = s.resample('2s')
|
| 315 |
+
|
| 316 |
+
>>> r.agg("sum")
|
| 317 |
+
2013-01-01 00:00:00 3
|
| 318 |
+
2013-01-01 00:00:02 7
|
| 319 |
+
2013-01-01 00:00:04 5
|
| 320 |
+
Freq: 2s, dtype: int64
|
| 321 |
+
|
| 322 |
+
>>> r.agg(['sum', 'mean', 'max'])
|
| 323 |
+
sum mean max
|
| 324 |
+
2013-01-01 00:00:00 3 1.5 2
|
| 325 |
+
2013-01-01 00:00:02 7 3.5 4
|
| 326 |
+
2013-01-01 00:00:04 5 5.0 5
|
| 327 |
+
|
| 328 |
+
>>> r.agg({'result': lambda x: x.mean() / x.std(),
|
| 329 |
+
... 'total': "sum"})
|
| 330 |
+
result total
|
| 331 |
+
2013-01-01 00:00:00 2.121320 3
|
| 332 |
+
2013-01-01 00:00:02 4.949747 7
|
| 333 |
+
2013-01-01 00:00:04 NaN 5
|
| 334 |
+
|
| 335 |
+
>>> r.agg(average="mean", total="sum")
|
| 336 |
+
average total
|
| 337 |
+
2013-01-01 00:00:00 1.5 3
|
| 338 |
+
2013-01-01 00:00:02 3.5 7
|
| 339 |
+
2013-01-01 00:00:04 5.0 5
|
| 340 |
+
"""
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
@final
|
| 344 |
+
@doc(
|
| 345 |
+
_shared_docs["aggregate"],
|
| 346 |
+
see_also=_agg_see_also_doc,
|
| 347 |
+
examples=_agg_examples_doc,
|
| 348 |
+
klass="DataFrame",
|
| 349 |
+
axis="",
|
| 350 |
+
)
|
| 351 |
+
def aggregate(self, func=None, *args, **kwargs):
|
| 352 |
+
result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
|
| 353 |
+
if result is None:
|
| 354 |
+
how = func
|
| 355 |
+
result = self._groupby_and_aggregate(how, *args, **kwargs)
|
| 356 |
+
|
| 357 |
+
return result
|
| 358 |
+
|
| 359 |
+
agg = aggregate
|
| 360 |
+
apply = aggregate
|
| 361 |
+
|
| 362 |
+
@final
|
| 363 |
+
def transform(self, arg, *args, **kwargs):
|
| 364 |
+
"""
|
| 365 |
+
Call function producing a like-indexed Series on each group.
|
| 366 |
+
|
| 367 |
+
Return a Series with the transformed values.
|
| 368 |
+
|
| 369 |
+
Parameters
|
| 370 |
+
----------
|
| 371 |
+
arg : function
|
| 372 |
+
To apply to each group. Should return a Series with the same index.
|
| 373 |
+
|
| 374 |
+
Returns
|
| 375 |
+
-------
|
| 376 |
+
Series
|
| 377 |
+
|
| 378 |
+
Examples
|
| 379 |
+
--------
|
| 380 |
+
>>> s = pd.Series([1, 2],
|
| 381 |
+
... index=pd.date_range('20180101',
|
| 382 |
+
... periods=2,
|
| 383 |
+
... freq='1h'))
|
| 384 |
+
>>> s
|
| 385 |
+
2018-01-01 00:00:00 1
|
| 386 |
+
2018-01-01 01:00:00 2
|
| 387 |
+
Freq: h, dtype: int64
|
| 388 |
+
|
| 389 |
+
>>> resampled = s.resample('15min')
|
| 390 |
+
>>> resampled.transform(lambda x: (x - x.mean()) / x.std())
|
| 391 |
+
2018-01-01 00:00:00 NaN
|
| 392 |
+
2018-01-01 01:00:00 NaN
|
| 393 |
+
Freq: h, dtype: float64
|
| 394 |
+
"""
|
| 395 |
+
return self._selected_obj.groupby(self._timegrouper).transform(
|
| 396 |
+
arg, *args, **kwargs
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
def _downsample(self, f, **kwargs):
|
| 400 |
+
raise AbstractMethodError(self)
|
| 401 |
+
|
| 402 |
+
def _upsample(self, f, limit: int | None = None, fill_value=None):
|
| 403 |
+
raise AbstractMethodError(self)
|
| 404 |
+
|
| 405 |
+
def _gotitem(self, key, ndim: int, subset=None):
|
| 406 |
+
"""
|
| 407 |
+
Sub-classes to define. Return a sliced object.
|
| 408 |
+
|
| 409 |
+
Parameters
|
| 410 |
+
----------
|
| 411 |
+
key : string / list of selections
|
| 412 |
+
ndim : {1, 2}
|
| 413 |
+
requested ndim of result
|
| 414 |
+
subset : object, default None
|
| 415 |
+
subset to act on
|
| 416 |
+
"""
|
| 417 |
+
grouper = self._grouper
|
| 418 |
+
if subset is None:
|
| 419 |
+
subset = self.obj
|
| 420 |
+
if key is not None:
|
| 421 |
+
subset = subset[key]
|
| 422 |
+
else:
|
| 423 |
+
# reached via Apply.agg_dict_like with selection=None and ndim=1
|
| 424 |
+
assert subset.ndim == 1
|
| 425 |
+
if ndim == 1:
|
| 426 |
+
assert subset.ndim == 1
|
| 427 |
+
|
| 428 |
+
grouped = get_groupby(
|
| 429 |
+
subset, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys
|
| 430 |
+
)
|
| 431 |
+
return grouped
|
| 432 |
+
|
| 433 |
+
def _groupby_and_aggregate(self, how, *args, **kwargs):
|
| 434 |
+
"""
|
| 435 |
+
Re-evaluate the obj with a groupby aggregation.
|
| 436 |
+
"""
|
| 437 |
+
grouper = self._grouper
|
| 438 |
+
|
| 439 |
+
# Excludes `on` column when provided
|
| 440 |
+
obj = self._obj_with_exclusions
|
| 441 |
+
|
| 442 |
+
grouped = get_groupby(
|
| 443 |
+
obj, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
try:
|
| 447 |
+
if callable(how):
|
| 448 |
+
# TODO: test_resample_apply_with_additional_args fails if we go
|
| 449 |
+
# through the non-lambda path, not clear that it should.
|
| 450 |
+
func = lambda x: how(x, *args, **kwargs)
|
| 451 |
+
result = grouped.aggregate(func)
|
| 452 |
+
else:
|
| 453 |
+
result = grouped.aggregate(how, *args, **kwargs)
|
| 454 |
+
except (AttributeError, KeyError):
|
| 455 |
+
# we have a non-reducing function; try to evaluate
|
| 456 |
+
# alternatively we want to evaluate only a column of the input
|
| 457 |
+
|
| 458 |
+
# test_apply_to_one_column_of_df the function being applied references
|
| 459 |
+
# a DataFrame column, but aggregate_item_by_item operates column-wise
|
| 460 |
+
# on Series, raising AttributeError or KeyError
|
| 461 |
+
# (depending on whether the column lookup uses getattr/__getitem__)
|
| 462 |
+
result = _apply(
|
| 463 |
+
grouped, how, *args, include_groups=self.include_groups, **kwargs
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
except ValueError as err:
|
| 467 |
+
if "Must produce aggregated value" in str(err):
|
| 468 |
+
# raised in _aggregate_named
|
| 469 |
+
# see test_apply_without_aggregation, test_apply_with_mutated_index
|
| 470 |
+
pass
|
| 471 |
+
else:
|
| 472 |
+
raise
|
| 473 |
+
|
| 474 |
+
# we have a non-reducing function
|
| 475 |
+
# try to evaluate
|
| 476 |
+
result = _apply(
|
| 477 |
+
grouped, how, *args, include_groups=self.include_groups, **kwargs
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
return self._wrap_result(result)
|
| 481 |
+
|
| 482 |
+
@final
|
| 483 |
+
def _get_resampler_for_grouping(
|
| 484 |
+
self, groupby: GroupBy, key, include_groups: bool = True
|
| 485 |
+
):
|
| 486 |
+
"""
|
| 487 |
+
Return the correct class for resampling with groupby.
|
| 488 |
+
"""
|
| 489 |
+
return self._resampler_for_grouping(
|
| 490 |
+
groupby=groupby, key=key, parent=self, include_groups=include_groups
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
def _wrap_result(self, result):
|
| 494 |
+
"""
|
| 495 |
+
Potentially wrap any results.
|
| 496 |
+
"""
|
| 497 |
+
# GH 47705
|
| 498 |
+
obj = self.obj
|
| 499 |
+
if (
|
| 500 |
+
isinstance(result, ABCDataFrame)
|
| 501 |
+
and len(result) == 0
|
| 502 |
+
and not isinstance(result.index, PeriodIndex)
|
| 503 |
+
):
|
| 504 |
+
result = result.set_index(
|
| 505 |
+
_asfreq_compat(obj.index[:0], freq=self.freq), append=True
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
if isinstance(result, ABCSeries) and self._selection is not None:
|
| 509 |
+
result.name = self._selection
|
| 510 |
+
|
| 511 |
+
if isinstance(result, ABCSeries) and result.empty:
|
| 512 |
+
# When index is all NaT, result is empty but index is not
|
| 513 |
+
result.index = _asfreq_compat(obj.index[:0], freq=self.freq)
|
| 514 |
+
result.name = getattr(obj, "name", None)
|
| 515 |
+
|
| 516 |
+
if self._timegrouper._arrow_dtype is not None:
|
| 517 |
+
result.index = result.index.astype(self._timegrouper._arrow_dtype)
|
| 518 |
+
|
| 519 |
+
return result
|
| 520 |
+
|
| 521 |
+
@final
|
| 522 |
+
def ffill(self, limit: int | None = None):
|
| 523 |
+
"""
|
| 524 |
+
Forward fill the values.
|
| 525 |
+
|
| 526 |
+
Parameters
|
| 527 |
+
----------
|
| 528 |
+
limit : int, optional
|
| 529 |
+
Limit of how many values to fill.
|
| 530 |
+
|
| 531 |
+
Returns
|
| 532 |
+
-------
|
| 533 |
+
An upsampled Series.
|
| 534 |
+
|
| 535 |
+
See Also
|
| 536 |
+
--------
|
| 537 |
+
Series.fillna: Fill NA/NaN values using the specified method.
|
| 538 |
+
DataFrame.fillna: Fill NA/NaN values using the specified method.
|
| 539 |
+
|
| 540 |
+
Examples
|
| 541 |
+
--------
|
| 542 |
+
Here we only create a ``Series``.
|
| 543 |
+
|
| 544 |
+
>>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
|
| 545 |
+
... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
|
| 546 |
+
>>> ser
|
| 547 |
+
2023-01-01 1
|
| 548 |
+
2023-01-15 2
|
| 549 |
+
2023-02-01 3
|
| 550 |
+
2023-02-15 4
|
| 551 |
+
dtype: int64
|
| 552 |
+
|
| 553 |
+
Example for ``ffill`` with downsampling (we have fewer dates after resampling):
|
| 554 |
+
|
| 555 |
+
>>> ser.resample('MS').ffill()
|
| 556 |
+
2023-01-01 1
|
| 557 |
+
2023-02-01 3
|
| 558 |
+
Freq: MS, dtype: int64
|
| 559 |
+
|
| 560 |
+
Example for ``ffill`` with upsampling (fill the new dates with
|
| 561 |
+
the previous value):
|
| 562 |
+
|
| 563 |
+
>>> ser.resample('W').ffill()
|
| 564 |
+
2023-01-01 1
|
| 565 |
+
2023-01-08 1
|
| 566 |
+
2023-01-15 2
|
| 567 |
+
2023-01-22 2
|
| 568 |
+
2023-01-29 2
|
| 569 |
+
2023-02-05 3
|
| 570 |
+
2023-02-12 3
|
| 571 |
+
2023-02-19 4
|
| 572 |
+
Freq: W-SUN, dtype: int64
|
| 573 |
+
|
| 574 |
+
With upsampling and limiting (only fill the first new date with the
|
| 575 |
+
previous value):
|
| 576 |
+
|
| 577 |
+
>>> ser.resample('W').ffill(limit=1)
|
| 578 |
+
2023-01-01 1.0
|
| 579 |
+
2023-01-08 1.0
|
| 580 |
+
2023-01-15 2.0
|
| 581 |
+
2023-01-22 2.0
|
| 582 |
+
2023-01-29 NaN
|
| 583 |
+
2023-02-05 3.0
|
| 584 |
+
2023-02-12 NaN
|
| 585 |
+
2023-02-19 4.0
|
| 586 |
+
Freq: W-SUN, dtype: float64
|
| 587 |
+
"""
|
| 588 |
+
return self._upsample("ffill", limit=limit)
|
| 589 |
+
|
| 590 |
+
@final
|
| 591 |
+
def nearest(self, limit: int | None = None):
|
| 592 |
+
"""
|
| 593 |
+
Resample by using the nearest value.
|
| 594 |
+
|
| 595 |
+
When resampling data, missing values may appear (e.g., when the
|
| 596 |
+
resampling frequency is higher than the original frequency).
|
| 597 |
+
The `nearest` method will replace ``NaN`` values that appeared in
|
| 598 |
+
the resampled data with the value from the nearest member of the
|
| 599 |
+
sequence, based on the index value.
|
| 600 |
+
Missing values that existed in the original data will not be modified.
|
| 601 |
+
If `limit` is given, fill only this many values in each direction for
|
| 602 |
+
each of the original values.
|
| 603 |
+
|
| 604 |
+
Parameters
|
| 605 |
+
----------
|
| 606 |
+
limit : int, optional
|
| 607 |
+
Limit of how many values to fill.
|
| 608 |
+
|
| 609 |
+
Returns
|
| 610 |
+
-------
|
| 611 |
+
Series or DataFrame
|
| 612 |
+
An upsampled Series or DataFrame with ``NaN`` values filled with
|
| 613 |
+
their nearest value.
|
| 614 |
+
|
| 615 |
+
See Also
|
| 616 |
+
--------
|
| 617 |
+
backfill : Backward fill the new missing values in the resampled data.
|
| 618 |
+
pad : Forward fill ``NaN`` values.
|
| 619 |
+
|
| 620 |
+
Examples
|
| 621 |
+
--------
|
| 622 |
+
>>> s = pd.Series([1, 2],
|
| 623 |
+
... index=pd.date_range('20180101',
|
| 624 |
+
... periods=2,
|
| 625 |
+
... freq='1h'))
|
| 626 |
+
>>> s
|
| 627 |
+
2018-01-01 00:00:00 1
|
| 628 |
+
2018-01-01 01:00:00 2
|
| 629 |
+
Freq: h, dtype: int64
|
| 630 |
+
|
| 631 |
+
>>> s.resample('15min').nearest()
|
| 632 |
+
2018-01-01 00:00:00 1
|
| 633 |
+
2018-01-01 00:15:00 1
|
| 634 |
+
2018-01-01 00:30:00 2
|
| 635 |
+
2018-01-01 00:45:00 2
|
| 636 |
+
2018-01-01 01:00:00 2
|
| 637 |
+
Freq: 15min, dtype: int64
|
| 638 |
+
|
| 639 |
+
Limit the number of upsampled values imputed by the nearest:
|
| 640 |
+
|
| 641 |
+
>>> s.resample('15min').nearest(limit=1)
|
| 642 |
+
2018-01-01 00:00:00 1.0
|
| 643 |
+
2018-01-01 00:15:00 1.0
|
| 644 |
+
2018-01-01 00:30:00 NaN
|
| 645 |
+
2018-01-01 00:45:00 2.0
|
| 646 |
+
2018-01-01 01:00:00 2.0
|
| 647 |
+
Freq: 15min, dtype: float64
|
| 648 |
+
"""
|
| 649 |
+
return self._upsample("nearest", limit=limit)
|
| 650 |
+
|
| 651 |
+
@final
|
| 652 |
+
def bfill(self, limit: int | None = None):
|
| 653 |
+
"""
|
| 654 |
+
Backward fill the new missing values in the resampled data.
|
| 655 |
+
|
| 656 |
+
In statistics, imputation is the process of replacing missing data with
|
| 657 |
+
substituted values [1]_. When resampling data, missing values may
|
| 658 |
+
appear (e.g., when the resampling frequency is higher than the original
|
| 659 |
+
frequency). The backward fill will replace NaN values that appeared in
|
| 660 |
+
the resampled data with the next value in the original sequence.
|
| 661 |
+
Missing values that existed in the original data will not be modified.
|
| 662 |
+
|
| 663 |
+
Parameters
|
| 664 |
+
----------
|
| 665 |
+
limit : int, optional
|
| 666 |
+
Limit of how many values to fill.
|
| 667 |
+
|
| 668 |
+
Returns
|
| 669 |
+
-------
|
| 670 |
+
Series, DataFrame
|
| 671 |
+
An upsampled Series or DataFrame with backward filled NaN values.
|
| 672 |
+
|
| 673 |
+
See Also
|
| 674 |
+
--------
|
| 675 |
+
bfill : Alias of backfill.
|
| 676 |
+
fillna : Fill NaN values using the specified method, which can be
|
| 677 |
+
'backfill'.
|
| 678 |
+
nearest : Fill NaN values with nearest neighbor starting from center.
|
| 679 |
+
ffill : Forward fill NaN values.
|
| 680 |
+
Series.fillna : Fill NaN values in the Series using the
|
| 681 |
+
specified method, which can be 'backfill'.
|
| 682 |
+
DataFrame.fillna : Fill NaN values in the DataFrame using the
|
| 683 |
+
specified method, which can be 'backfill'.
|
| 684 |
+
|
| 685 |
+
References
|
| 686 |
+
----------
|
| 687 |
+
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
|
| 688 |
+
|
| 689 |
+
Examples
|
| 690 |
+
--------
|
| 691 |
+
Resampling a Series:
|
| 692 |
+
|
| 693 |
+
>>> s = pd.Series([1, 2, 3],
|
| 694 |
+
... index=pd.date_range('20180101', periods=3, freq='h'))
|
| 695 |
+
>>> s
|
| 696 |
+
2018-01-01 00:00:00 1
|
| 697 |
+
2018-01-01 01:00:00 2
|
| 698 |
+
2018-01-01 02:00:00 3
|
| 699 |
+
Freq: h, dtype: int64
|
| 700 |
+
|
| 701 |
+
>>> s.resample('30min').bfill()
|
| 702 |
+
2018-01-01 00:00:00 1
|
| 703 |
+
2018-01-01 00:30:00 2
|
| 704 |
+
2018-01-01 01:00:00 2
|
| 705 |
+
2018-01-01 01:30:00 3
|
| 706 |
+
2018-01-01 02:00:00 3
|
| 707 |
+
Freq: 30min, dtype: int64
|
| 708 |
+
|
| 709 |
+
>>> s.resample('15min').bfill(limit=2)
|
| 710 |
+
2018-01-01 00:00:00 1.0
|
| 711 |
+
2018-01-01 00:15:00 NaN
|
| 712 |
+
2018-01-01 00:30:00 2.0
|
| 713 |
+
2018-01-01 00:45:00 2.0
|
| 714 |
+
2018-01-01 01:00:00 2.0
|
| 715 |
+
2018-01-01 01:15:00 NaN
|
| 716 |
+
2018-01-01 01:30:00 3.0
|
| 717 |
+
2018-01-01 01:45:00 3.0
|
| 718 |
+
2018-01-01 02:00:00 3.0
|
| 719 |
+
Freq: 15min, dtype: float64
|
| 720 |
+
|
| 721 |
+
Resampling a DataFrame that has missing values:
|
| 722 |
+
|
| 723 |
+
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
|
| 724 |
+
... index=pd.date_range('20180101', periods=3,
|
| 725 |
+
... freq='h'))
|
| 726 |
+
>>> df
|
| 727 |
+
a b
|
| 728 |
+
2018-01-01 00:00:00 2.0 1
|
| 729 |
+
2018-01-01 01:00:00 NaN 3
|
| 730 |
+
2018-01-01 02:00:00 6.0 5
|
| 731 |
+
|
| 732 |
+
>>> df.resample('30min').bfill()
|
| 733 |
+
a b
|
| 734 |
+
2018-01-01 00:00:00 2.0 1
|
| 735 |
+
2018-01-01 00:30:00 NaN 3
|
| 736 |
+
2018-01-01 01:00:00 NaN 3
|
| 737 |
+
2018-01-01 01:30:00 6.0 5
|
| 738 |
+
2018-01-01 02:00:00 6.0 5
|
| 739 |
+
|
| 740 |
+
>>> df.resample('15min').bfill(limit=2)
|
| 741 |
+
a b
|
| 742 |
+
2018-01-01 00:00:00 2.0 1.0
|
| 743 |
+
2018-01-01 00:15:00 NaN NaN
|
| 744 |
+
2018-01-01 00:30:00 NaN 3.0
|
| 745 |
+
2018-01-01 00:45:00 NaN 3.0
|
| 746 |
+
2018-01-01 01:00:00 NaN 3.0
|
| 747 |
+
2018-01-01 01:15:00 NaN NaN
|
| 748 |
+
2018-01-01 01:30:00 6.0 5.0
|
| 749 |
+
2018-01-01 01:45:00 6.0 5.0
|
| 750 |
+
2018-01-01 02:00:00 6.0 5.0
|
| 751 |
+
"""
|
| 752 |
+
return self._upsample("bfill", limit=limit)
|
| 753 |
+
|
| 754 |
+
@final
|
| 755 |
+
def fillna(self, method, limit: int | None = None):
|
| 756 |
+
"""
|
| 757 |
+
Fill missing values introduced by upsampling.
|
| 758 |
+
|
| 759 |
+
In statistics, imputation is the process of replacing missing data with
|
| 760 |
+
substituted values [1]_. When resampling data, missing values may
|
| 761 |
+
appear (e.g., when the resampling frequency is higher than the original
|
| 762 |
+
frequency).
|
| 763 |
+
|
| 764 |
+
Missing values that existed in the original data will
|
| 765 |
+
not be modified.
|
| 766 |
+
|
| 767 |
+
Parameters
|
| 768 |
+
----------
|
| 769 |
+
method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}
|
| 770 |
+
Method to use for filling holes in resampled data
|
| 771 |
+
|
| 772 |
+
* 'pad' or 'ffill': use previous valid observation to fill gap
|
| 773 |
+
(forward fill).
|
| 774 |
+
* 'backfill' or 'bfill': use next valid observation to fill gap.
|
| 775 |
+
* 'nearest': use nearest valid observation to fill gap.
|
| 776 |
+
|
| 777 |
+
limit : int, optional
|
| 778 |
+
Limit of how many consecutive missing values to fill.
|
| 779 |
+
|
| 780 |
+
Returns
|
| 781 |
+
-------
|
| 782 |
+
Series or DataFrame
|
| 783 |
+
An upsampled Series or DataFrame with missing values filled.
|
| 784 |
+
|
| 785 |
+
See Also
|
| 786 |
+
--------
|
| 787 |
+
bfill : Backward fill NaN values in the resampled data.
|
| 788 |
+
ffill : Forward fill NaN values in the resampled data.
|
| 789 |
+
nearest : Fill NaN values in the resampled data
|
| 790 |
+
with nearest neighbor starting from center.
|
| 791 |
+
interpolate : Fill NaN values using interpolation.
|
| 792 |
+
Series.fillna : Fill NaN values in the Series using the
|
| 793 |
+
specified method, which can be 'bfill' and 'ffill'.
|
| 794 |
+
DataFrame.fillna : Fill NaN values in the DataFrame using the
|
| 795 |
+
specified method, which can be 'bfill' and 'ffill'.
|
| 796 |
+
|
| 797 |
+
References
|
| 798 |
+
----------
|
| 799 |
+
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
|
| 800 |
+
|
| 801 |
+
Examples
|
| 802 |
+
--------
|
| 803 |
+
Resampling a Series:
|
| 804 |
+
|
| 805 |
+
>>> s = pd.Series([1, 2, 3],
|
| 806 |
+
... index=pd.date_range('20180101', periods=3, freq='h'))
|
| 807 |
+
>>> s
|
| 808 |
+
2018-01-01 00:00:00 1
|
| 809 |
+
2018-01-01 01:00:00 2
|
| 810 |
+
2018-01-01 02:00:00 3
|
| 811 |
+
Freq: h, dtype: int64
|
| 812 |
+
|
| 813 |
+
Without filling the missing values you get:
|
| 814 |
+
|
| 815 |
+
>>> s.resample("30min").asfreq()
|
| 816 |
+
2018-01-01 00:00:00 1.0
|
| 817 |
+
2018-01-01 00:30:00 NaN
|
| 818 |
+
2018-01-01 01:00:00 2.0
|
| 819 |
+
2018-01-01 01:30:00 NaN
|
| 820 |
+
2018-01-01 02:00:00 3.0
|
| 821 |
+
Freq: 30min, dtype: float64
|
| 822 |
+
|
| 823 |
+
>>> s.resample('30min').fillna("backfill")
|
| 824 |
+
2018-01-01 00:00:00 1
|
| 825 |
+
2018-01-01 00:30:00 2
|
| 826 |
+
2018-01-01 01:00:00 2
|
| 827 |
+
2018-01-01 01:30:00 3
|
| 828 |
+
2018-01-01 02:00:00 3
|
| 829 |
+
Freq: 30min, dtype: int64
|
| 830 |
+
|
| 831 |
+
>>> s.resample('15min').fillna("backfill", limit=2)
|
| 832 |
+
2018-01-01 00:00:00 1.0
|
| 833 |
+
2018-01-01 00:15:00 NaN
|
| 834 |
+
2018-01-01 00:30:00 2.0
|
| 835 |
+
2018-01-01 00:45:00 2.0
|
| 836 |
+
2018-01-01 01:00:00 2.0
|
| 837 |
+
2018-01-01 01:15:00 NaN
|
| 838 |
+
2018-01-01 01:30:00 3.0
|
| 839 |
+
2018-01-01 01:45:00 3.0
|
| 840 |
+
2018-01-01 02:00:00 3.0
|
| 841 |
+
Freq: 15min, dtype: float64
|
| 842 |
+
|
| 843 |
+
>>> s.resample('30min').fillna("pad")
|
| 844 |
+
2018-01-01 00:00:00 1
|
| 845 |
+
2018-01-01 00:30:00 1
|
| 846 |
+
2018-01-01 01:00:00 2
|
| 847 |
+
2018-01-01 01:30:00 2
|
| 848 |
+
2018-01-01 02:00:00 3
|
| 849 |
+
Freq: 30min, dtype: int64
|
| 850 |
+
|
| 851 |
+
>>> s.resample('30min').fillna("nearest")
|
| 852 |
+
2018-01-01 00:00:00 1
|
| 853 |
+
2018-01-01 00:30:00 2
|
| 854 |
+
2018-01-01 01:00:00 2
|
| 855 |
+
2018-01-01 01:30:00 3
|
| 856 |
+
2018-01-01 02:00:00 3
|
| 857 |
+
Freq: 30min, dtype: int64
|
| 858 |
+
|
| 859 |
+
Missing values present before the upsampling are not affected.
|
| 860 |
+
|
| 861 |
+
>>> sm = pd.Series([1, None, 3],
|
| 862 |
+
... index=pd.date_range('20180101', periods=3, freq='h'))
|
| 863 |
+
>>> sm
|
| 864 |
+
2018-01-01 00:00:00 1.0
|
| 865 |
+
2018-01-01 01:00:00 NaN
|
| 866 |
+
2018-01-01 02:00:00 3.0
|
| 867 |
+
Freq: h, dtype: float64
|
| 868 |
+
|
| 869 |
+
>>> sm.resample('30min').fillna('backfill')
|
| 870 |
+
2018-01-01 00:00:00 1.0
|
| 871 |
+
2018-01-01 00:30:00 NaN
|
| 872 |
+
2018-01-01 01:00:00 NaN
|
| 873 |
+
2018-01-01 01:30:00 3.0
|
| 874 |
+
2018-01-01 02:00:00 3.0
|
| 875 |
+
Freq: 30min, dtype: float64
|
| 876 |
+
|
| 877 |
+
>>> sm.resample('30min').fillna('pad')
|
| 878 |
+
2018-01-01 00:00:00 1.0
|
| 879 |
+
2018-01-01 00:30:00 1.0
|
| 880 |
+
2018-01-01 01:00:00 NaN
|
| 881 |
+
2018-01-01 01:30:00 NaN
|
| 882 |
+
2018-01-01 02:00:00 3.0
|
| 883 |
+
Freq: 30min, dtype: float64
|
| 884 |
+
|
| 885 |
+
>>> sm.resample('30min').fillna('nearest')
|
| 886 |
+
2018-01-01 00:00:00 1.0
|
| 887 |
+
2018-01-01 00:30:00 NaN
|
| 888 |
+
2018-01-01 01:00:00 NaN
|
| 889 |
+
2018-01-01 01:30:00 3.0
|
| 890 |
+
2018-01-01 02:00:00 3.0
|
| 891 |
+
Freq: 30min, dtype: float64
|
| 892 |
+
|
| 893 |
+
DataFrame resampling is done column-wise. All the same options are
|
| 894 |
+
available.
|
| 895 |
+
|
| 896 |
+
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
|
| 897 |
+
... index=pd.date_range('20180101', periods=3,
|
| 898 |
+
... freq='h'))
|
| 899 |
+
>>> df
|
| 900 |
+
a b
|
| 901 |
+
2018-01-01 00:00:00 2.0 1
|
| 902 |
+
2018-01-01 01:00:00 NaN 3
|
| 903 |
+
2018-01-01 02:00:00 6.0 5
|
| 904 |
+
|
| 905 |
+
>>> df.resample('30min').fillna("bfill")
|
| 906 |
+
a b
|
| 907 |
+
2018-01-01 00:00:00 2.0 1
|
| 908 |
+
2018-01-01 00:30:00 NaN 3
|
| 909 |
+
2018-01-01 01:00:00 NaN 3
|
| 910 |
+
2018-01-01 01:30:00 6.0 5
|
| 911 |
+
2018-01-01 02:00:00 6.0 5
|
| 912 |
+
"""
|
| 913 |
+
warnings.warn(
|
| 914 |
+
f"{type(self).__name__}.fillna is deprecated and will be removed "
|
| 915 |
+
"in a future version. Use obj.ffill(), obj.bfill(), "
|
| 916 |
+
"or obj.nearest() instead.",
|
| 917 |
+
FutureWarning,
|
| 918 |
+
stacklevel=find_stack_level(),
|
| 919 |
+
)
|
| 920 |
+
return self._upsample(method, limit=limit)
|
| 921 |
+
|
| 922 |
+
@final
|
| 923 |
+
def interpolate(
|
| 924 |
+
self,
|
| 925 |
+
method: InterpolateOptions = "linear",
|
| 926 |
+
*,
|
| 927 |
+
axis: Axis = 0,
|
| 928 |
+
limit: int | None = None,
|
| 929 |
+
inplace: bool = False,
|
| 930 |
+
limit_direction: Literal["forward", "backward", "both"] = "forward",
|
| 931 |
+
limit_area=None,
|
| 932 |
+
downcast=lib.no_default,
|
| 933 |
+
**kwargs,
|
| 934 |
+
):
|
| 935 |
+
"""
|
| 936 |
+
Interpolate values between target timestamps according to different methods.
|
| 937 |
+
|
| 938 |
+
The original index is first reindexed to target timestamps
|
| 939 |
+
(see :meth:`core.resample.Resampler.asfreq`),
|
| 940 |
+
then the interpolation of ``NaN`` values via :meth:`DataFrame.interpolate`
|
| 941 |
+
happens.
|
| 942 |
+
|
| 943 |
+
Parameters
|
| 944 |
+
----------
|
| 945 |
+
method : str, default 'linear'
|
| 946 |
+
Interpolation technique to use. One of:
|
| 947 |
+
|
| 948 |
+
* 'linear': Ignore the index and treat the values as equally
|
| 949 |
+
spaced. This is the only method supported on MultiIndexes.
|
| 950 |
+
* 'time': Works on daily and higher resolution data to interpolate
|
| 951 |
+
given length of interval.
|
| 952 |
+
* 'index', 'values': use the actual numerical values of the index.
|
| 953 |
+
* 'pad': Fill in NaNs using existing values.
|
| 954 |
+
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
|
| 955 |
+
'barycentric', 'polynomial': Passed to
|
| 956 |
+
`scipy.interpolate.interp1d`, whereas 'spline' is passed to
|
| 957 |
+
`scipy.interpolate.UnivariateSpline`. These methods use the numerical
|
| 958 |
+
values of the index. Both 'polynomial' and 'spline' require that
|
| 959 |
+
you also specify an `order` (int), e.g.
|
| 960 |
+
``df.interpolate(method='polynomial', order=5)``. Note that,
|
| 961 |
+
`slinear` method in Pandas refers to the Scipy first order `spline`
|
| 962 |
+
instead of Pandas first order `spline`.
|
| 963 |
+
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima',
|
| 964 |
+
'cubicspline': Wrappers around the SciPy interpolation methods of
|
| 965 |
+
similar names. See `Notes`.
|
| 966 |
+
* 'from_derivatives': Refers to
|
| 967 |
+
`scipy.interpolate.BPoly.from_derivatives`.
|
| 968 |
+
|
| 969 |
+
axis : {{0 or 'index', 1 or 'columns', None}}, default None
|
| 970 |
+
Axis to interpolate along. For `Series` this parameter is unused
|
| 971 |
+
and defaults to 0.
|
| 972 |
+
limit : int, optional
|
| 973 |
+
Maximum number of consecutive NaNs to fill. Must be greater than
|
| 974 |
+
0.
|
| 975 |
+
inplace : bool, default False
|
| 976 |
+
Update the data in place if possible.
|
| 977 |
+
limit_direction : {{'forward', 'backward', 'both'}}, Optional
|
| 978 |
+
Consecutive NaNs will be filled in this direction.
|
| 979 |
+
|
| 980 |
+
If limit is specified:
|
| 981 |
+
* If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'.
|
| 982 |
+
* If 'method' is 'backfill' or 'bfill', 'limit_direction' must be
|
| 983 |
+
'backwards'.
|
| 984 |
+
|
| 985 |
+
If 'limit' is not specified:
|
| 986 |
+
* If 'method' is 'backfill' or 'bfill', the default is 'backward'
|
| 987 |
+
* else the default is 'forward'
|
| 988 |
+
|
| 989 |
+
raises ValueError if `limit_direction` is 'forward' or 'both' and
|
| 990 |
+
method is 'backfill' or 'bfill'.
|
| 991 |
+
raises ValueError if `limit_direction` is 'backward' or 'both' and
|
| 992 |
+
method is 'pad' or 'ffill'.
|
| 993 |
+
|
| 994 |
+
limit_area : {{`None`, 'inside', 'outside'}}, default None
|
| 995 |
+
If limit is specified, consecutive NaNs will be filled with this
|
| 996 |
+
restriction.
|
| 997 |
+
|
| 998 |
+
* ``None``: No fill restriction.
|
| 999 |
+
* 'inside': Only fill NaNs surrounded by valid values
|
| 1000 |
+
(interpolate).
|
| 1001 |
+
* 'outside': Only fill NaNs outside valid values (extrapolate).
|
| 1002 |
+
|
| 1003 |
+
downcast : optional, 'infer' or None, defaults to None
|
| 1004 |
+
Downcast dtypes if possible.
|
| 1005 |
+
|
| 1006 |
+
.. deprecated:: 2.1.0
|
| 1007 |
+
|
| 1008 |
+
``**kwargs`` : optional
|
| 1009 |
+
Keyword arguments to pass on to the interpolating function.
|
| 1010 |
+
|
| 1011 |
+
Returns
|
| 1012 |
+
-------
|
| 1013 |
+
DataFrame or Series
|
| 1014 |
+
Interpolated values at the specified freq.
|
| 1015 |
+
|
| 1016 |
+
See Also
|
| 1017 |
+
--------
|
| 1018 |
+
core.resample.Resampler.asfreq: Return the values at the new freq,
|
| 1019 |
+
essentially a reindex.
|
| 1020 |
+
DataFrame.interpolate: Fill NaN values using an interpolation method.
|
| 1021 |
+
|
| 1022 |
+
Notes
|
| 1023 |
+
-----
|
| 1024 |
+
For high-frequent or non-equidistant time-series with timestamps
|
| 1025 |
+
the reindexing followed by interpolation may lead to information loss
|
| 1026 |
+
as shown in the last example.
|
| 1027 |
+
|
| 1028 |
+
Examples
|
| 1029 |
+
--------
|
| 1030 |
+
|
| 1031 |
+
>>> start = "2023-03-01T07:00:00"
|
| 1032 |
+
>>> timesteps = pd.date_range(start, periods=5, freq="s")
|
| 1033 |
+
>>> series = pd.Series(data=[1, -1, 2, 1, 3], index=timesteps)
|
| 1034 |
+
>>> series
|
| 1035 |
+
2023-03-01 07:00:00 1
|
| 1036 |
+
2023-03-01 07:00:01 -1
|
| 1037 |
+
2023-03-01 07:00:02 2
|
| 1038 |
+
2023-03-01 07:00:03 1
|
| 1039 |
+
2023-03-01 07:00:04 3
|
| 1040 |
+
Freq: s, dtype: int64
|
| 1041 |
+
|
| 1042 |
+
Upsample the dataframe to 0.5Hz by providing the period time of 2s.
|
| 1043 |
+
|
| 1044 |
+
>>> series.resample("2s").interpolate("linear")
|
| 1045 |
+
2023-03-01 07:00:00 1
|
| 1046 |
+
2023-03-01 07:00:02 2
|
| 1047 |
+
2023-03-01 07:00:04 3
|
| 1048 |
+
Freq: 2s, dtype: int64
|
| 1049 |
+
|
| 1050 |
+
Downsample the dataframe to 2Hz by providing the period time of 500ms.
|
| 1051 |
+
|
| 1052 |
+
>>> series.resample("500ms").interpolate("linear")
|
| 1053 |
+
2023-03-01 07:00:00.000 1.0
|
| 1054 |
+
2023-03-01 07:00:00.500 0.0
|
| 1055 |
+
2023-03-01 07:00:01.000 -1.0
|
| 1056 |
+
2023-03-01 07:00:01.500 0.5
|
| 1057 |
+
2023-03-01 07:00:02.000 2.0
|
| 1058 |
+
2023-03-01 07:00:02.500 1.5
|
| 1059 |
+
2023-03-01 07:00:03.000 1.0
|
| 1060 |
+
2023-03-01 07:00:03.500 2.0
|
| 1061 |
+
2023-03-01 07:00:04.000 3.0
|
| 1062 |
+
Freq: 500ms, dtype: float64
|
| 1063 |
+
|
| 1064 |
+
Internal reindexing with ``asfreq()`` prior to interpolation leads to
|
| 1065 |
+
an interpolated timeseries on the basis the reindexed timestamps (anchors).
|
| 1066 |
+
Since not all datapoints from original series become anchors,
|
| 1067 |
+
it can lead to misleading interpolation results as in the following example:
|
| 1068 |
+
|
| 1069 |
+
>>> series.resample("400ms").interpolate("linear")
|
| 1070 |
+
2023-03-01 07:00:00.000 1.0
|
| 1071 |
+
2023-03-01 07:00:00.400 1.2
|
| 1072 |
+
2023-03-01 07:00:00.800 1.4
|
| 1073 |
+
2023-03-01 07:00:01.200 1.6
|
| 1074 |
+
2023-03-01 07:00:01.600 1.8
|
| 1075 |
+
2023-03-01 07:00:02.000 2.0
|
| 1076 |
+
2023-03-01 07:00:02.400 2.2
|
| 1077 |
+
2023-03-01 07:00:02.800 2.4
|
| 1078 |
+
2023-03-01 07:00:03.200 2.6
|
| 1079 |
+
2023-03-01 07:00:03.600 2.8
|
| 1080 |
+
2023-03-01 07:00:04.000 3.0
|
| 1081 |
+
Freq: 400ms, dtype: float64
|
| 1082 |
+
|
| 1083 |
+
Note that the series erroneously increases between two anchors
|
| 1084 |
+
``07:00:00`` and ``07:00:02``.
|
| 1085 |
+
"""
|
| 1086 |
+
assert downcast is lib.no_default # just checking coverage
|
| 1087 |
+
result = self._upsample("asfreq")
|
| 1088 |
+
return result.interpolate(
|
| 1089 |
+
method=method,
|
| 1090 |
+
axis=axis,
|
| 1091 |
+
limit=limit,
|
| 1092 |
+
inplace=inplace,
|
| 1093 |
+
limit_direction=limit_direction,
|
| 1094 |
+
limit_area=limit_area,
|
| 1095 |
+
downcast=downcast,
|
| 1096 |
+
**kwargs,
|
| 1097 |
+
)
|
| 1098 |
+
|
| 1099 |
+
@final
|
| 1100 |
+
def asfreq(self, fill_value=None):
|
| 1101 |
+
"""
|
| 1102 |
+
Return the values at the new freq, essentially a reindex.
|
| 1103 |
+
|
| 1104 |
+
Parameters
|
| 1105 |
+
----------
|
| 1106 |
+
fill_value : scalar, optional
|
| 1107 |
+
Value to use for missing values, applied during upsampling (note
|
| 1108 |
+
this does not fill NaNs that already were present).
|
| 1109 |
+
|
| 1110 |
+
Returns
|
| 1111 |
+
-------
|
| 1112 |
+
DataFrame or Series
|
| 1113 |
+
Values at the specified freq.
|
| 1114 |
+
|
| 1115 |
+
See Also
|
| 1116 |
+
--------
|
| 1117 |
+
Series.asfreq: Convert TimeSeries to specified frequency.
|
| 1118 |
+
DataFrame.asfreq: Convert TimeSeries to specified frequency.
|
| 1119 |
+
|
| 1120 |
+
Examples
|
| 1121 |
+
--------
|
| 1122 |
+
|
| 1123 |
+
>>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
|
| 1124 |
+
... ['2023-01-01', '2023-01-31', '2023-02-01', '2023-02-28']))
|
| 1125 |
+
>>> ser
|
| 1126 |
+
2023-01-01 1
|
| 1127 |
+
2023-01-31 2
|
| 1128 |
+
2023-02-01 3
|
| 1129 |
+
2023-02-28 4
|
| 1130 |
+
dtype: int64
|
| 1131 |
+
>>> ser.resample('MS').asfreq()
|
| 1132 |
+
2023-01-01 1
|
| 1133 |
+
2023-02-01 3
|
| 1134 |
+
Freq: MS, dtype: int64
|
| 1135 |
+
"""
|
| 1136 |
+
return self._upsample("asfreq", fill_value=fill_value)
|
| 1137 |
+
|
| 1138 |
+
@final
|
| 1139 |
+
def sum(
|
| 1140 |
+
self,
|
| 1141 |
+
numeric_only: bool = False,
|
| 1142 |
+
min_count: int = 0,
|
| 1143 |
+
*args,
|
| 1144 |
+
**kwargs,
|
| 1145 |
+
):
|
| 1146 |
+
"""
|
| 1147 |
+
Compute sum of group values.
|
| 1148 |
+
|
| 1149 |
+
Parameters
|
| 1150 |
+
----------
|
| 1151 |
+
numeric_only : bool, default False
|
| 1152 |
+
Include only float, int, boolean columns.
|
| 1153 |
+
|
| 1154 |
+
.. versionchanged:: 2.0.0
|
| 1155 |
+
|
| 1156 |
+
numeric_only no longer accepts ``None``.
|
| 1157 |
+
|
| 1158 |
+
min_count : int, default 0
|
| 1159 |
+
The required number of valid values to perform the operation. If fewer
|
| 1160 |
+
than ``min_count`` non-NA values are present the result will be NA.
|
| 1161 |
+
|
| 1162 |
+
Returns
|
| 1163 |
+
-------
|
| 1164 |
+
Series or DataFrame
|
| 1165 |
+
Computed sum of values within each group.
|
| 1166 |
+
|
| 1167 |
+
Examples
|
| 1168 |
+
--------
|
| 1169 |
+
>>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
|
| 1170 |
+
... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
|
| 1171 |
+
>>> ser
|
| 1172 |
+
2023-01-01 1
|
| 1173 |
+
2023-01-15 2
|
| 1174 |
+
2023-02-01 3
|
| 1175 |
+
2023-02-15 4
|
| 1176 |
+
dtype: int64
|
| 1177 |
+
>>> ser.resample('MS').sum()
|
| 1178 |
+
2023-01-01 3
|
| 1179 |
+
2023-02-01 7
|
| 1180 |
+
Freq: MS, dtype: int64
|
| 1181 |
+
"""
|
| 1182 |
+
maybe_warn_args_and_kwargs(type(self), "sum", args, kwargs)
|
| 1183 |
+
nv.validate_resampler_func("sum", args, kwargs)
|
| 1184 |
+
return self._downsample("sum", numeric_only=numeric_only, min_count=min_count)
|
| 1185 |
+
|
| 1186 |
+
@final
|
| 1187 |
+
def prod(
|
| 1188 |
+
self,
|
| 1189 |
+
numeric_only: bool = False,
|
| 1190 |
+
min_count: int = 0,
|
| 1191 |
+
*args,
|
| 1192 |
+
**kwargs,
|
| 1193 |
+
):
|
| 1194 |
+
"""
|
| 1195 |
+
Compute prod of group values.
|
| 1196 |
+
|
| 1197 |
+
Parameters
|
| 1198 |
+
----------
|
| 1199 |
+
numeric_only : bool, default False
|
| 1200 |
+
Include only float, int, boolean columns.
|
| 1201 |
+
|
| 1202 |
+
.. versionchanged:: 2.0.0
|
| 1203 |
+
|
| 1204 |
+
numeric_only no longer accepts ``None``.
|
| 1205 |
+
|
| 1206 |
+
min_count : int, default 0
|
| 1207 |
+
The required number of valid values to perform the operation. If fewer
|
| 1208 |
+
than ``min_count`` non-NA values are present the result will be NA.
|
| 1209 |
+
|
| 1210 |
+
Returns
|
| 1211 |
+
-------
|
| 1212 |
+
Series or DataFrame
|
| 1213 |
+
Computed prod of values within each group.
|
| 1214 |
+
|
| 1215 |
+
Examples
|
| 1216 |
+
--------
|
| 1217 |
+
>>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
|
| 1218 |
+
... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
|
| 1219 |
+
>>> ser
|
| 1220 |
+
2023-01-01 1
|
| 1221 |
+
2023-01-15 2
|
| 1222 |
+
2023-02-01 3
|
| 1223 |
+
2023-02-15 4
|
| 1224 |
+
dtype: int64
|
| 1225 |
+
>>> ser.resample('MS').prod()
|
| 1226 |
+
2023-01-01 2
|
| 1227 |
+
2023-02-01 12
|
| 1228 |
+
Freq: MS, dtype: int64
|
| 1229 |
+
"""
|
| 1230 |
+
maybe_warn_args_and_kwargs(type(self), "prod", args, kwargs)
|
| 1231 |
+
nv.validate_resampler_func("prod", args, kwargs)
|
| 1232 |
+
return self._downsample("prod", numeric_only=numeric_only, min_count=min_count)
|
| 1233 |
+
|
| 1234 |
+
@final
|
| 1235 |
+
def min(
|
| 1236 |
+
self,
|
| 1237 |
+
numeric_only: bool = False,
|
| 1238 |
+
min_count: int = 0,
|
| 1239 |
+
*args,
|
| 1240 |
+
**kwargs,
|
| 1241 |
+
):
|
| 1242 |
+
"""
|
| 1243 |
+
Compute min value of group.
|
| 1244 |
+
|
| 1245 |
+
Returns
|
| 1246 |
+
-------
|
| 1247 |
+
Series or DataFrame
|
| 1248 |
+
|
| 1249 |
+
Examples
|
| 1250 |
+
--------
|
| 1251 |
+
>>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
|
| 1252 |
+
... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
|
| 1253 |
+
>>> ser
|
| 1254 |
+
2023-01-01 1
|
| 1255 |
+
2023-01-15 2
|
| 1256 |
+
2023-02-01 3
|
| 1257 |
+
2023-02-15 4
|
| 1258 |
+
dtype: int64
|
| 1259 |
+
>>> ser.resample('MS').min()
|
| 1260 |
+
2023-01-01 1
|
| 1261 |
+
2023-02-01 3
|
| 1262 |
+
Freq: MS, dtype: int64
|
| 1263 |
+
"""
|
| 1264 |
+
|
| 1265 |
+
maybe_warn_args_and_kwargs(type(self), "min", args, kwargs)
|
| 1266 |
+
nv.validate_resampler_func("min", args, kwargs)
|
| 1267 |
+
return self._downsample("min", numeric_only=numeric_only, min_count=min_count)
|
| 1268 |
+
|
| 1269 |
+
@final
|
| 1270 |
+
def max(
|
| 1271 |
+
self,
|
| 1272 |
+
numeric_only: bool = False,
|
| 1273 |
+
min_count: int = 0,
|
| 1274 |
+
*args,
|
| 1275 |
+
**kwargs,
|
| 1276 |
+
):
|
| 1277 |
+
"""
|
| 1278 |
+
Compute max value of group.
|
| 1279 |
+
|
| 1280 |
+
Returns
|
| 1281 |
+
-------
|
| 1282 |
+
Series or DataFrame
|
| 1283 |
+
|
| 1284 |
+
Examples
|
| 1285 |
+
--------
|
| 1286 |
+
>>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
|
| 1287 |
+
... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
|
| 1288 |
+
>>> ser
|
| 1289 |
+
2023-01-01 1
|
| 1290 |
+
2023-01-15 2
|
| 1291 |
+
2023-02-01 3
|
| 1292 |
+
2023-02-15 4
|
| 1293 |
+
dtype: int64
|
| 1294 |
+
>>> ser.resample('MS').max()
|
| 1295 |
+
2023-01-01 2
|
| 1296 |
+
2023-02-01 4
|
| 1297 |
+
Freq: MS, dtype: int64
|
| 1298 |
+
"""
|
| 1299 |
+
maybe_warn_args_and_kwargs(type(self), "max", args, kwargs)
|
| 1300 |
+
nv.validate_resampler_func("max", args, kwargs)
|
| 1301 |
+
return self._downsample("max", numeric_only=numeric_only, min_count=min_count)
|
| 1302 |
+
|
| 1303 |
+
@final
|
| 1304 |
+
@doc(GroupBy.first)
|
| 1305 |
+
def first(
|
| 1306 |
+
self,
|
| 1307 |
+
numeric_only: bool = False,
|
| 1308 |
+
min_count: int = 0,
|
| 1309 |
+
skipna: bool = True,
|
| 1310 |
+
*args,
|
| 1311 |
+
**kwargs,
|
| 1312 |
+
):
|
| 1313 |
+
maybe_warn_args_and_kwargs(type(self), "first", args, kwargs)
|
| 1314 |
+
nv.validate_resampler_func("first", args, kwargs)
|
| 1315 |
+
return self._downsample(
|
| 1316 |
+
"first", numeric_only=numeric_only, min_count=min_count, skipna=skipna
|
| 1317 |
+
)
|
| 1318 |
+
|
| 1319 |
+
@final
|
| 1320 |
+
@doc(GroupBy.last)
|
| 1321 |
+
def last(
|
| 1322 |
+
self,
|
| 1323 |
+
numeric_only: bool = False,
|
| 1324 |
+
min_count: int = 0,
|
| 1325 |
+
skipna: bool = True,
|
| 1326 |
+
*args,
|
| 1327 |
+
**kwargs,
|
| 1328 |
+
):
|
| 1329 |
+
maybe_warn_args_and_kwargs(type(self), "last", args, kwargs)
|
| 1330 |
+
nv.validate_resampler_func("last", args, kwargs)
|
| 1331 |
+
return self._downsample(
|
| 1332 |
+
"last", numeric_only=numeric_only, min_count=min_count, skipna=skipna
|
| 1333 |
+
)
|
| 1334 |
+
|
| 1335 |
+
@final
|
| 1336 |
+
@doc(GroupBy.median)
|
| 1337 |
+
def median(self, numeric_only: bool = False, *args, **kwargs):
|
| 1338 |
+
maybe_warn_args_and_kwargs(type(self), "median", args, kwargs)
|
| 1339 |
+
nv.validate_resampler_func("median", args, kwargs)
|
| 1340 |
+
return self._downsample("median", numeric_only=numeric_only)
|
| 1341 |
+
|
| 1342 |
+
@final
|
| 1343 |
+
def mean(
|
| 1344 |
+
self,
|
| 1345 |
+
numeric_only: bool = False,
|
| 1346 |
+
*args,
|
| 1347 |
+
**kwargs,
|
| 1348 |
+
):
|
| 1349 |
+
"""
|
| 1350 |
+
Compute mean of groups, excluding missing values.
|
| 1351 |
+
|
| 1352 |
+
Parameters
|
| 1353 |
+
----------
|
| 1354 |
+
numeric_only : bool, default False
|
| 1355 |
+
Include only `float`, `int` or `boolean` data.
|
| 1356 |
+
|
| 1357 |
+
.. versionchanged:: 2.0.0
|
| 1358 |
+
|
| 1359 |
+
numeric_only now defaults to ``False``.
|
| 1360 |
+
|
| 1361 |
+
Returns
|
| 1362 |
+
-------
|
| 1363 |
+
DataFrame or Series
|
| 1364 |
+
Mean of values within each group.
|
| 1365 |
+
|
| 1366 |
+
Examples
|
| 1367 |
+
--------
|
| 1368 |
+
|
| 1369 |
+
>>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
|
| 1370 |
+
... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
|
| 1371 |
+
>>> ser
|
| 1372 |
+
2023-01-01 1
|
| 1373 |
+
2023-01-15 2
|
| 1374 |
+
2023-02-01 3
|
| 1375 |
+
2023-02-15 4
|
| 1376 |
+
dtype: int64
|
| 1377 |
+
>>> ser.resample('MS').mean()
|
| 1378 |
+
2023-01-01 1.5
|
| 1379 |
+
2023-02-01 3.5
|
| 1380 |
+
Freq: MS, dtype: float64
|
| 1381 |
+
"""
|
| 1382 |
+
maybe_warn_args_and_kwargs(type(self), "mean", args, kwargs)
|
| 1383 |
+
nv.validate_resampler_func("mean", args, kwargs)
|
| 1384 |
+
return self._downsample("mean", numeric_only=numeric_only)
|
| 1385 |
+
|
| 1386 |
+
@final
|
| 1387 |
+
def std(
|
| 1388 |
+
self,
|
| 1389 |
+
ddof: int = 1,
|
| 1390 |
+
numeric_only: bool = False,
|
| 1391 |
+
*args,
|
| 1392 |
+
**kwargs,
|
| 1393 |
+
):
|
| 1394 |
+
"""
|
| 1395 |
+
Compute standard deviation of groups, excluding missing values.
|
| 1396 |
+
|
| 1397 |
+
Parameters
|
| 1398 |
+
----------
|
| 1399 |
+
ddof : int, default 1
|
| 1400 |
+
Degrees of freedom.
|
| 1401 |
+
numeric_only : bool, default False
|
| 1402 |
+
Include only `float`, `int` or `boolean` data.
|
| 1403 |
+
|
| 1404 |
+
.. versionadded:: 1.5.0
|
| 1405 |
+
|
| 1406 |
+
.. versionchanged:: 2.0.0
|
| 1407 |
+
|
| 1408 |
+
numeric_only now defaults to ``False``.
|
| 1409 |
+
|
| 1410 |
+
Returns
|
| 1411 |
+
-------
|
| 1412 |
+
DataFrame or Series
|
| 1413 |
+
Standard deviation of values within each group.
|
| 1414 |
+
|
| 1415 |
+
Examples
|
| 1416 |
+
--------
|
| 1417 |
+
|
| 1418 |
+
>>> ser = pd.Series([1, 3, 2, 4, 3, 8],
|
| 1419 |
+
... index=pd.DatetimeIndex(['2023-01-01',
|
| 1420 |
+
... '2023-01-10',
|
| 1421 |
+
... '2023-01-15',
|
| 1422 |
+
... '2023-02-01',
|
| 1423 |
+
... '2023-02-10',
|
| 1424 |
+
... '2023-02-15']))
|
| 1425 |
+
>>> ser.resample('MS').std()
|
| 1426 |
+
2023-01-01 1.000000
|
| 1427 |
+
2023-02-01 2.645751
|
| 1428 |
+
Freq: MS, dtype: float64
|
| 1429 |
+
"""
|
| 1430 |
+
maybe_warn_args_and_kwargs(type(self), "std", args, kwargs)
|
| 1431 |
+
nv.validate_resampler_func("std", args, kwargs)
|
| 1432 |
+
return self._downsample("std", ddof=ddof, numeric_only=numeric_only)
|
| 1433 |
+
|
| 1434 |
+
@final
|
| 1435 |
+
def var(
|
| 1436 |
+
self,
|
| 1437 |
+
ddof: int = 1,
|
| 1438 |
+
numeric_only: bool = False,
|
| 1439 |
+
*args,
|
| 1440 |
+
**kwargs,
|
| 1441 |
+
):
|
| 1442 |
+
"""
|
| 1443 |
+
Compute variance of groups, excluding missing values.
|
| 1444 |
+
|
| 1445 |
+
Parameters
|
| 1446 |
+
----------
|
| 1447 |
+
ddof : int, default 1
|
| 1448 |
+
Degrees of freedom.
|
| 1449 |
+
|
| 1450 |
+
numeric_only : bool, default False
|
| 1451 |
+
Include only `float`, `int` or `boolean` data.
|
| 1452 |
+
|
| 1453 |
+
.. versionadded:: 1.5.0
|
| 1454 |
+
|
| 1455 |
+
.. versionchanged:: 2.0.0
|
| 1456 |
+
|
| 1457 |
+
numeric_only now defaults to ``False``.
|
| 1458 |
+
|
| 1459 |
+
Returns
|
| 1460 |
+
-------
|
| 1461 |
+
DataFrame or Series
|
| 1462 |
+
Variance of values within each group.
|
| 1463 |
+
|
| 1464 |
+
Examples
|
| 1465 |
+
--------
|
| 1466 |
+
|
| 1467 |
+
>>> ser = pd.Series([1, 3, 2, 4, 3, 8],
|
| 1468 |
+
... index=pd.DatetimeIndex(['2023-01-01',
|
| 1469 |
+
... '2023-01-10',
|
| 1470 |
+
... '2023-01-15',
|
| 1471 |
+
... '2023-02-01',
|
| 1472 |
+
... '2023-02-10',
|
| 1473 |
+
... '2023-02-15']))
|
| 1474 |
+
>>> ser.resample('MS').var()
|
| 1475 |
+
2023-01-01 1.0
|
| 1476 |
+
2023-02-01 7.0
|
| 1477 |
+
Freq: MS, dtype: float64
|
| 1478 |
+
|
| 1479 |
+
>>> ser.resample('MS').var(ddof=0)
|
| 1480 |
+
2023-01-01 0.666667
|
| 1481 |
+
2023-02-01 4.666667
|
| 1482 |
+
Freq: MS, dtype: float64
|
| 1483 |
+
"""
|
| 1484 |
+
maybe_warn_args_and_kwargs(type(self), "var", args, kwargs)
|
| 1485 |
+
nv.validate_resampler_func("var", args, kwargs)
|
| 1486 |
+
return self._downsample("var", ddof=ddof, numeric_only=numeric_only)
|
| 1487 |
+
|
| 1488 |
+
@final
|
| 1489 |
+
@doc(GroupBy.sem)
|
| 1490 |
+
def sem(
|
| 1491 |
+
self,
|
| 1492 |
+
ddof: int = 1,
|
| 1493 |
+
numeric_only: bool = False,
|
| 1494 |
+
*args,
|
| 1495 |
+
**kwargs,
|
| 1496 |
+
):
|
| 1497 |
+
maybe_warn_args_and_kwargs(type(self), "sem", args, kwargs)
|
| 1498 |
+
nv.validate_resampler_func("sem", args, kwargs)
|
| 1499 |
+
return self._downsample("sem", ddof=ddof, numeric_only=numeric_only)
|
| 1500 |
+
|
| 1501 |
+
@final
|
| 1502 |
+
@doc(GroupBy.ohlc)
|
| 1503 |
+
def ohlc(
|
| 1504 |
+
self,
|
| 1505 |
+
*args,
|
| 1506 |
+
**kwargs,
|
| 1507 |
+
):
|
| 1508 |
+
maybe_warn_args_and_kwargs(type(self), "ohlc", args, kwargs)
|
| 1509 |
+
nv.validate_resampler_func("ohlc", args, kwargs)
|
| 1510 |
+
|
| 1511 |
+
ax = self.ax
|
| 1512 |
+
obj = self._obj_with_exclusions
|
| 1513 |
+
if len(ax) == 0:
|
| 1514 |
+
# GH#42902
|
| 1515 |
+
obj = obj.copy()
|
| 1516 |
+
obj.index = _asfreq_compat(obj.index, self.freq)
|
| 1517 |
+
if obj.ndim == 1:
|
| 1518 |
+
obj = obj.to_frame()
|
| 1519 |
+
obj = obj.reindex(["open", "high", "low", "close"], axis=1)
|
| 1520 |
+
else:
|
| 1521 |
+
mi = MultiIndex.from_product(
|
| 1522 |
+
[obj.columns, ["open", "high", "low", "close"]]
|
| 1523 |
+
)
|
| 1524 |
+
obj = obj.reindex(mi, axis=1)
|
| 1525 |
+
return obj
|
| 1526 |
+
|
| 1527 |
+
return self._downsample("ohlc")
|
| 1528 |
+
|
| 1529 |
+
@final
|
| 1530 |
+
@doc(SeriesGroupBy.nunique)
|
| 1531 |
+
def nunique(
|
| 1532 |
+
self,
|
| 1533 |
+
*args,
|
| 1534 |
+
**kwargs,
|
| 1535 |
+
):
|
| 1536 |
+
maybe_warn_args_and_kwargs(type(self), "nunique", args, kwargs)
|
| 1537 |
+
nv.validate_resampler_func("nunique", args, kwargs)
|
| 1538 |
+
return self._downsample("nunique")
|
| 1539 |
+
|
| 1540 |
+
@final
|
| 1541 |
+
@doc(GroupBy.size)
|
| 1542 |
+
def size(self):
|
| 1543 |
+
result = self._downsample("size")
|
| 1544 |
+
|
| 1545 |
+
# If the result is a non-empty DataFrame we stack to get a Series
|
| 1546 |
+
# GH 46826
|
| 1547 |
+
if isinstance(result, ABCDataFrame) and not result.empty:
|
| 1548 |
+
result = result.stack(future_stack=True)
|
| 1549 |
+
|
| 1550 |
+
if not len(self.ax):
|
| 1551 |
+
from pandas import Series
|
| 1552 |
+
|
| 1553 |
+
if self._selected_obj.ndim == 1:
|
| 1554 |
+
name = self._selected_obj.name
|
| 1555 |
+
else:
|
| 1556 |
+
name = None
|
| 1557 |
+
result = Series([], index=result.index, dtype="int64", name=name)
|
| 1558 |
+
return result
|
| 1559 |
+
|
| 1560 |
+
@final
|
| 1561 |
+
@doc(GroupBy.count)
|
| 1562 |
+
def count(self):
|
| 1563 |
+
result = self._downsample("count")
|
| 1564 |
+
if not len(self.ax):
|
| 1565 |
+
if self._selected_obj.ndim == 1:
|
| 1566 |
+
result = type(self._selected_obj)(
|
| 1567 |
+
[], index=result.index, dtype="int64", name=self._selected_obj.name
|
| 1568 |
+
)
|
| 1569 |
+
else:
|
| 1570 |
+
from pandas import DataFrame
|
| 1571 |
+
|
| 1572 |
+
result = DataFrame(
|
| 1573 |
+
[], index=result.index, columns=result.columns, dtype="int64"
|
| 1574 |
+
)
|
| 1575 |
+
|
| 1576 |
+
return result
|
| 1577 |
+
|
| 1578 |
+
@final
|
| 1579 |
+
def quantile(self, q: float | list[float] | AnyArrayLike = 0.5, **kwargs):
|
| 1580 |
+
"""
|
| 1581 |
+
Return value at the given quantile.
|
| 1582 |
+
|
| 1583 |
+
Parameters
|
| 1584 |
+
----------
|
| 1585 |
+
q : float or array-like, default 0.5 (50% quantile)
|
| 1586 |
+
|
| 1587 |
+
Returns
|
| 1588 |
+
-------
|
| 1589 |
+
DataFrame or Series
|
| 1590 |
+
Quantile of values within each group.
|
| 1591 |
+
|
| 1592 |
+
See Also
|
| 1593 |
+
--------
|
| 1594 |
+
Series.quantile
|
| 1595 |
+
Return a series, where the index is q and the values are the quantiles.
|
| 1596 |
+
DataFrame.quantile
|
| 1597 |
+
Return a DataFrame, where the columns are the columns of self,
|
| 1598 |
+
and the values are the quantiles.
|
| 1599 |
+
DataFrameGroupBy.quantile
|
| 1600 |
+
Return a DataFrame, where the columns are groupby columns,
|
| 1601 |
+
and the values are its quantiles.
|
| 1602 |
+
|
| 1603 |
+
Examples
|
| 1604 |
+
--------
|
| 1605 |
+
|
| 1606 |
+
>>> ser = pd.Series([1, 3, 2, 4, 3, 8],
|
| 1607 |
+
... index=pd.DatetimeIndex(['2023-01-01',
|
| 1608 |
+
... '2023-01-10',
|
| 1609 |
+
... '2023-01-15',
|
| 1610 |
+
... '2023-02-01',
|
| 1611 |
+
... '2023-02-10',
|
| 1612 |
+
... '2023-02-15']))
|
| 1613 |
+
>>> ser.resample('MS').quantile()
|
| 1614 |
+
2023-01-01 2.0
|
| 1615 |
+
2023-02-01 4.0
|
| 1616 |
+
Freq: MS, dtype: float64
|
| 1617 |
+
|
| 1618 |
+
>>> ser.resample('MS').quantile(.25)
|
| 1619 |
+
2023-01-01 1.5
|
| 1620 |
+
2023-02-01 3.5
|
| 1621 |
+
Freq: MS, dtype: float64
|
| 1622 |
+
"""
|
| 1623 |
+
return self._downsample("quantile", q=q, **kwargs)
|
| 1624 |
+
|
| 1625 |
+
|
| 1626 |
+
class _GroupByMixin(PandasObject, SelectionMixin):
|
| 1627 |
+
"""
|
| 1628 |
+
Provide the groupby facilities.
|
| 1629 |
+
"""
|
| 1630 |
+
|
| 1631 |
+
_attributes: list[str] # in practice the same as Resampler._attributes
|
| 1632 |
+
_selection: IndexLabel | None = None
|
| 1633 |
+
_groupby: GroupBy
|
| 1634 |
+
_timegrouper: TimeGrouper
|
| 1635 |
+
|
| 1636 |
+
def __init__(
|
| 1637 |
+
self,
|
| 1638 |
+
*,
|
| 1639 |
+
parent: Resampler,
|
| 1640 |
+
groupby: GroupBy,
|
| 1641 |
+
key=None,
|
| 1642 |
+
selection: IndexLabel | None = None,
|
| 1643 |
+
include_groups: bool = False,
|
| 1644 |
+
) -> None:
|
| 1645 |
+
# reached via ._gotitem and _get_resampler_for_grouping
|
| 1646 |
+
|
| 1647 |
+
assert isinstance(groupby, GroupBy), type(groupby)
|
| 1648 |
+
|
| 1649 |
+
# parent is always a Resampler, sometimes a _GroupByMixin
|
| 1650 |
+
assert isinstance(parent, Resampler), type(parent)
|
| 1651 |
+
|
| 1652 |
+
# initialize our GroupByMixin object with
|
| 1653 |
+
# the resampler attributes
|
| 1654 |
+
for attr in self._attributes:
|
| 1655 |
+
setattr(self, attr, getattr(parent, attr))
|
| 1656 |
+
self._selection = selection
|
| 1657 |
+
|
| 1658 |
+
self.binner = parent.binner
|
| 1659 |
+
self.key = key
|
| 1660 |
+
|
| 1661 |
+
self._groupby = groupby
|
| 1662 |
+
self._timegrouper = copy.copy(parent._timegrouper)
|
| 1663 |
+
|
| 1664 |
+
self.ax = parent.ax
|
| 1665 |
+
self.obj = parent.obj
|
| 1666 |
+
self.include_groups = include_groups
|
| 1667 |
+
|
| 1668 |
+
@no_type_check
|
| 1669 |
+
def _apply(self, f, *args, **kwargs):
|
| 1670 |
+
"""
|
| 1671 |
+
Dispatch to _upsample; we are stripping all of the _upsample kwargs and
|
| 1672 |
+
performing the original function call on the grouped object.
|
| 1673 |
+
"""
|
| 1674 |
+
|
| 1675 |
+
def func(x):
|
| 1676 |
+
x = self._resampler_cls(x, timegrouper=self._timegrouper, gpr_index=self.ax)
|
| 1677 |
+
|
| 1678 |
+
if isinstance(f, str):
|
| 1679 |
+
return getattr(x, f)(**kwargs)
|
| 1680 |
+
|
| 1681 |
+
return x.apply(f, *args, **kwargs)
|
| 1682 |
+
|
| 1683 |
+
result = _apply(self._groupby, func, include_groups=self.include_groups)
|
| 1684 |
+
return self._wrap_result(result)
|
| 1685 |
+
|
| 1686 |
+
_upsample = _apply
|
| 1687 |
+
_downsample = _apply
|
| 1688 |
+
_groupby_and_aggregate = _apply
|
| 1689 |
+
|
| 1690 |
+
@final
|
| 1691 |
+
def _gotitem(self, key, ndim, subset=None):
|
| 1692 |
+
"""
|
| 1693 |
+
Sub-classes to define. Return a sliced object.
|
| 1694 |
+
|
| 1695 |
+
Parameters
|
| 1696 |
+
----------
|
| 1697 |
+
key : string / list of selections
|
| 1698 |
+
ndim : {1, 2}
|
| 1699 |
+
requested ndim of result
|
| 1700 |
+
subset : object, default None
|
| 1701 |
+
subset to act on
|
| 1702 |
+
"""
|
| 1703 |
+
# create a new object to prevent aliasing
|
| 1704 |
+
if subset is None:
|
| 1705 |
+
subset = self.obj
|
| 1706 |
+
if key is not None:
|
| 1707 |
+
subset = subset[key]
|
| 1708 |
+
else:
|
| 1709 |
+
# reached via Apply.agg_dict_like with selection=None, ndim=1
|
| 1710 |
+
assert subset.ndim == 1
|
| 1711 |
+
|
| 1712 |
+
# Try to select from a DataFrame, falling back to a Series
|
| 1713 |
+
try:
|
| 1714 |
+
if isinstance(key, list) and self.key not in key and self.key is not None:
|
| 1715 |
+
key.append(self.key)
|
| 1716 |
+
groupby = self._groupby[key]
|
| 1717 |
+
except IndexError:
|
| 1718 |
+
groupby = self._groupby
|
| 1719 |
+
|
| 1720 |
+
selection = self._infer_selection(key, subset)
|
| 1721 |
+
|
| 1722 |
+
new_rs = type(self)(
|
| 1723 |
+
groupby=groupby,
|
| 1724 |
+
parent=cast(Resampler, self),
|
| 1725 |
+
selection=selection,
|
| 1726 |
+
)
|
| 1727 |
+
return new_rs
|
| 1728 |
+
|
| 1729 |
+
|
| 1730 |
+
class DatetimeIndexResampler(Resampler):
|
| 1731 |
+
ax: DatetimeIndex
|
| 1732 |
+
|
| 1733 |
+
@property
|
| 1734 |
+
def _resampler_for_grouping(self):
|
| 1735 |
+
return DatetimeIndexResamplerGroupby
|
| 1736 |
+
|
| 1737 |
+
def _get_binner_for_time(self):
|
| 1738 |
+
# this is how we are actually creating the bins
|
| 1739 |
+
if self.kind == "period":
|
| 1740 |
+
return self._timegrouper._get_time_period_bins(self.ax)
|
| 1741 |
+
return self._timegrouper._get_time_bins(self.ax)
|
| 1742 |
+
|
| 1743 |
+
def _downsample(self, how, **kwargs):
|
| 1744 |
+
"""
|
| 1745 |
+
Downsample the cython defined function.
|
| 1746 |
+
|
| 1747 |
+
Parameters
|
| 1748 |
+
----------
|
| 1749 |
+
how : string / cython mapped function
|
| 1750 |
+
**kwargs : kw args passed to how function
|
| 1751 |
+
"""
|
| 1752 |
+
orig_how = how
|
| 1753 |
+
how = com.get_cython_func(how) or how
|
| 1754 |
+
if orig_how != how:
|
| 1755 |
+
warn_alias_replacement(self, orig_how, how)
|
| 1756 |
+
ax = self.ax
|
| 1757 |
+
|
| 1758 |
+
# Excludes `on` column when provided
|
| 1759 |
+
obj = self._obj_with_exclusions
|
| 1760 |
+
|
| 1761 |
+
if not len(ax):
|
| 1762 |
+
# reset to the new freq
|
| 1763 |
+
obj = obj.copy()
|
| 1764 |
+
obj.index = obj.index._with_freq(self.freq)
|
| 1765 |
+
assert obj.index.freq == self.freq, (obj.index.freq, self.freq)
|
| 1766 |
+
return obj
|
| 1767 |
+
|
| 1768 |
+
# do we have a regular frequency
|
| 1769 |
+
|
| 1770 |
+
# error: Item "None" of "Optional[Any]" has no attribute "binlabels"
|
| 1771 |
+
if (
|
| 1772 |
+
(ax.freq is not None or ax.inferred_freq is not None)
|
| 1773 |
+
and len(self._grouper.binlabels) > len(ax)
|
| 1774 |
+
and how is None
|
| 1775 |
+
):
|
| 1776 |
+
# let's do an asfreq
|
| 1777 |
+
return self.asfreq()
|
| 1778 |
+
|
| 1779 |
+
# we are downsampling
|
| 1780 |
+
# we want to call the actual grouper method here
|
| 1781 |
+
if self.axis == 0:
|
| 1782 |
+
result = obj.groupby(self._grouper).aggregate(how, **kwargs)
|
| 1783 |
+
else:
|
| 1784 |
+
# test_resample_axis1
|
| 1785 |
+
result = obj.T.groupby(self._grouper).aggregate(how, **kwargs).T
|
| 1786 |
+
|
| 1787 |
+
return self._wrap_result(result)
|
| 1788 |
+
|
| 1789 |
+
def _adjust_binner_for_upsample(self, binner):
|
| 1790 |
+
"""
|
| 1791 |
+
Adjust our binner when upsampling.
|
| 1792 |
+
|
| 1793 |
+
The range of a new index should not be outside specified range
|
| 1794 |
+
"""
|
| 1795 |
+
if self.closed == "right":
|
| 1796 |
+
binner = binner[1:]
|
| 1797 |
+
else:
|
| 1798 |
+
binner = binner[:-1]
|
| 1799 |
+
return binner
|
| 1800 |
+
|
| 1801 |
+
def _upsample(self, method, limit: int | None = None, fill_value=None):
|
| 1802 |
+
"""
|
| 1803 |
+
Parameters
|
| 1804 |
+
----------
|
| 1805 |
+
method : string {'backfill', 'bfill', 'pad',
|
| 1806 |
+
'ffill', 'asfreq'} method for upsampling
|
| 1807 |
+
limit : int, default None
|
| 1808 |
+
Maximum size gap to fill when reindexing
|
| 1809 |
+
fill_value : scalar, default None
|
| 1810 |
+
Value to use for missing values
|
| 1811 |
+
|
| 1812 |
+
See Also
|
| 1813 |
+
--------
|
| 1814 |
+
.fillna: Fill NA/NaN values using the specified method.
|
| 1815 |
+
|
| 1816 |
+
"""
|
| 1817 |
+
if self.axis:
|
| 1818 |
+
raise AssertionError("axis must be 0")
|
| 1819 |
+
if self._from_selection:
|
| 1820 |
+
raise ValueError(
|
| 1821 |
+
"Upsampling from level= or on= selection "
|
| 1822 |
+
"is not supported, use .set_index(...) "
|
| 1823 |
+
"to explicitly set index to datetime-like"
|
| 1824 |
+
)
|
| 1825 |
+
|
| 1826 |
+
ax = self.ax
|
| 1827 |
+
obj = self._selected_obj
|
| 1828 |
+
binner = self.binner
|
| 1829 |
+
res_index = self._adjust_binner_for_upsample(binner)
|
| 1830 |
+
|
| 1831 |
+
# if we have the same frequency as our axis, then we are equal sampling
|
| 1832 |
+
if (
|
| 1833 |
+
limit is None
|
| 1834 |
+
and to_offset(ax.inferred_freq) == self.freq
|
| 1835 |
+
and len(obj) == len(res_index)
|
| 1836 |
+
):
|
| 1837 |
+
result = obj.copy()
|
| 1838 |
+
result.index = res_index
|
| 1839 |
+
else:
|
| 1840 |
+
if method == "asfreq":
|
| 1841 |
+
method = None
|
| 1842 |
+
result = obj.reindex(
|
| 1843 |
+
res_index, method=method, limit=limit, fill_value=fill_value
|
| 1844 |
+
)
|
| 1845 |
+
|
| 1846 |
+
return self._wrap_result(result)
|
| 1847 |
+
|
| 1848 |
+
def _wrap_result(self, result):
|
| 1849 |
+
result = super()._wrap_result(result)
|
| 1850 |
+
|
| 1851 |
+
# we may have a different kind that we were asked originally
|
| 1852 |
+
# convert if needed
|
| 1853 |
+
if self.kind == "period" and not isinstance(result.index, PeriodIndex):
|
| 1854 |
+
if isinstance(result.index, MultiIndex):
|
| 1855 |
+
# GH 24103 - e.g. groupby resample
|
| 1856 |
+
if not isinstance(result.index.levels[-1], PeriodIndex):
|
| 1857 |
+
new_level = result.index.levels[-1].to_period(self.freq)
|
| 1858 |
+
result.index = result.index.set_levels(new_level, level=-1)
|
| 1859 |
+
else:
|
| 1860 |
+
result.index = result.index.to_period(self.freq)
|
| 1861 |
+
return result
|
| 1862 |
+
|
| 1863 |
+
|
| 1864 |
+
# error: Definition of "ax" in base class "_GroupByMixin" is incompatible
|
| 1865 |
+
# with definition in base class "DatetimeIndexResampler"
|
| 1866 |
+
class DatetimeIndexResamplerGroupby( # type: ignore[misc]
|
| 1867 |
+
_GroupByMixin, DatetimeIndexResampler
|
| 1868 |
+
):
|
| 1869 |
+
"""
|
| 1870 |
+
Provides a resample of a groupby implementation
|
| 1871 |
+
"""
|
| 1872 |
+
|
| 1873 |
+
@property
|
| 1874 |
+
def _resampler_cls(self):
|
| 1875 |
+
return DatetimeIndexResampler
|
| 1876 |
+
|
| 1877 |
+
|
| 1878 |
+
class PeriodIndexResampler(DatetimeIndexResampler):
|
| 1879 |
+
# error: Incompatible types in assignment (expression has type "PeriodIndex", base
|
| 1880 |
+
# class "DatetimeIndexResampler" defined the type as "DatetimeIndex")
|
| 1881 |
+
ax: PeriodIndex # type: ignore[assignment]
|
| 1882 |
+
|
| 1883 |
+
@property
|
| 1884 |
+
def _resampler_for_grouping(self):
|
| 1885 |
+
warnings.warn(
|
| 1886 |
+
"Resampling a groupby with a PeriodIndex is deprecated. "
|
| 1887 |
+
"Cast to DatetimeIndex before resampling instead.",
|
| 1888 |
+
FutureWarning,
|
| 1889 |
+
stacklevel=find_stack_level(),
|
| 1890 |
+
)
|
| 1891 |
+
return PeriodIndexResamplerGroupby
|
| 1892 |
+
|
| 1893 |
+
def _get_binner_for_time(self):
|
| 1894 |
+
if self.kind == "timestamp":
|
| 1895 |
+
return super()._get_binner_for_time()
|
| 1896 |
+
return self._timegrouper._get_period_bins(self.ax)
|
| 1897 |
+
|
| 1898 |
+
def _convert_obj(self, obj: NDFrameT) -> NDFrameT:
|
| 1899 |
+
obj = super()._convert_obj(obj)
|
| 1900 |
+
|
| 1901 |
+
if self._from_selection:
|
| 1902 |
+
# see GH 14008, GH 12871
|
| 1903 |
+
msg = (
|
| 1904 |
+
"Resampling from level= or on= selection "
|
| 1905 |
+
"with a PeriodIndex is not currently supported, "
|
| 1906 |
+
"use .set_index(...) to explicitly set index"
|
| 1907 |
+
)
|
| 1908 |
+
raise NotImplementedError(msg)
|
| 1909 |
+
|
| 1910 |
+
# convert to timestamp
|
| 1911 |
+
if self.kind == "timestamp":
|
| 1912 |
+
obj = obj.to_timestamp(how=self.convention)
|
| 1913 |
+
|
| 1914 |
+
return obj
|
| 1915 |
+
|
| 1916 |
+
def _downsample(self, how, **kwargs):
|
| 1917 |
+
"""
|
| 1918 |
+
Downsample the cython defined function.
|
| 1919 |
+
|
| 1920 |
+
Parameters
|
| 1921 |
+
----------
|
| 1922 |
+
how : string / cython mapped function
|
| 1923 |
+
**kwargs : kw args passed to how function
|
| 1924 |
+
"""
|
| 1925 |
+
# we may need to actually resample as if we are timestamps
|
| 1926 |
+
if self.kind == "timestamp":
|
| 1927 |
+
return super()._downsample(how, **kwargs)
|
| 1928 |
+
|
| 1929 |
+
orig_how = how
|
| 1930 |
+
how = com.get_cython_func(how) or how
|
| 1931 |
+
if orig_how != how:
|
| 1932 |
+
warn_alias_replacement(self, orig_how, how)
|
| 1933 |
+
ax = self.ax
|
| 1934 |
+
|
| 1935 |
+
if is_subperiod(ax.freq, self.freq):
|
| 1936 |
+
# Downsampling
|
| 1937 |
+
return self._groupby_and_aggregate(how, **kwargs)
|
| 1938 |
+
elif is_superperiod(ax.freq, self.freq):
|
| 1939 |
+
if how == "ohlc":
|
| 1940 |
+
# GH #13083
|
| 1941 |
+
# upsampling to subperiods is handled as an asfreq, which works
|
| 1942 |
+
# for pure aggregating/reducing methods
|
| 1943 |
+
# OHLC reduces along the time dimension, but creates multiple
|
| 1944 |
+
# values for each period -> handle by _groupby_and_aggregate()
|
| 1945 |
+
return self._groupby_and_aggregate(how)
|
| 1946 |
+
return self.asfreq()
|
| 1947 |
+
elif ax.freq == self.freq:
|
| 1948 |
+
return self.asfreq()
|
| 1949 |
+
|
| 1950 |
+
raise IncompatibleFrequency(
|
| 1951 |
+
f"Frequency {ax.freq} cannot be resampled to {self.freq}, "
|
| 1952 |
+
"as they are not sub or super periods"
|
| 1953 |
+
)
|
| 1954 |
+
|
| 1955 |
+
def _upsample(self, method, limit: int | None = None, fill_value=None):
|
| 1956 |
+
"""
|
| 1957 |
+
Parameters
|
| 1958 |
+
----------
|
| 1959 |
+
method : {'backfill', 'bfill', 'pad', 'ffill'}
|
| 1960 |
+
Method for upsampling.
|
| 1961 |
+
limit : int, default None
|
| 1962 |
+
Maximum size gap to fill when reindexing.
|
| 1963 |
+
fill_value : scalar, default None
|
| 1964 |
+
Value to use for missing values.
|
| 1965 |
+
|
| 1966 |
+
See Also
|
| 1967 |
+
--------
|
| 1968 |
+
.fillna: Fill NA/NaN values using the specified method.
|
| 1969 |
+
|
| 1970 |
+
"""
|
| 1971 |
+
# we may need to actually resample as if we are timestamps
|
| 1972 |
+
if self.kind == "timestamp":
|
| 1973 |
+
return super()._upsample(method, limit=limit, fill_value=fill_value)
|
| 1974 |
+
|
| 1975 |
+
ax = self.ax
|
| 1976 |
+
obj = self.obj
|
| 1977 |
+
new_index = self.binner
|
| 1978 |
+
|
| 1979 |
+
# Start vs. end of period
|
| 1980 |
+
memb = ax.asfreq(self.freq, how=self.convention)
|
| 1981 |
+
|
| 1982 |
+
# Get the fill indexer
|
| 1983 |
+
if method == "asfreq":
|
| 1984 |
+
method = None
|
| 1985 |
+
indexer = memb.get_indexer(new_index, method=method, limit=limit)
|
| 1986 |
+
new_obj = _take_new_index(
|
| 1987 |
+
obj,
|
| 1988 |
+
indexer,
|
| 1989 |
+
new_index,
|
| 1990 |
+
axis=self.axis,
|
| 1991 |
+
)
|
| 1992 |
+
return self._wrap_result(new_obj)
|
| 1993 |
+
|
| 1994 |
+
|
| 1995 |
+
# error: Definition of "ax" in base class "_GroupByMixin" is incompatible with
|
| 1996 |
+
# definition in base class "PeriodIndexResampler"
|
| 1997 |
+
class PeriodIndexResamplerGroupby( # type: ignore[misc]
|
| 1998 |
+
_GroupByMixin, PeriodIndexResampler
|
| 1999 |
+
):
|
| 2000 |
+
"""
|
| 2001 |
+
Provides a resample of a groupby implementation.
|
| 2002 |
+
"""
|
| 2003 |
+
|
| 2004 |
+
@property
|
| 2005 |
+
def _resampler_cls(self):
|
| 2006 |
+
return PeriodIndexResampler
|
| 2007 |
+
|
| 2008 |
+
|
| 2009 |
+
class TimedeltaIndexResampler(DatetimeIndexResampler):
|
| 2010 |
+
# error: Incompatible types in assignment (expression has type "TimedeltaIndex",
|
| 2011 |
+
# base class "DatetimeIndexResampler" defined the type as "DatetimeIndex")
|
| 2012 |
+
ax: TimedeltaIndex # type: ignore[assignment]
|
| 2013 |
+
|
| 2014 |
+
@property
|
| 2015 |
+
def _resampler_for_grouping(self):
|
| 2016 |
+
return TimedeltaIndexResamplerGroupby
|
| 2017 |
+
|
| 2018 |
+
def _get_binner_for_time(self):
|
| 2019 |
+
return self._timegrouper._get_time_delta_bins(self.ax)
|
| 2020 |
+
|
| 2021 |
+
def _adjust_binner_for_upsample(self, binner):
|
| 2022 |
+
"""
|
| 2023 |
+
Adjust our binner when upsampling.
|
| 2024 |
+
|
| 2025 |
+
The range of a new index is allowed to be greater than original range
|
| 2026 |
+
so we don't need to change the length of a binner, GH 13022
|
| 2027 |
+
"""
|
| 2028 |
+
return binner
|
| 2029 |
+
|
| 2030 |
+
|
| 2031 |
+
# error: Definition of "ax" in base class "_GroupByMixin" is incompatible with
|
| 2032 |
+
# definition in base class "DatetimeIndexResampler"
|
| 2033 |
+
class TimedeltaIndexResamplerGroupby( # type: ignore[misc]
|
| 2034 |
+
_GroupByMixin, TimedeltaIndexResampler
|
| 2035 |
+
):
|
| 2036 |
+
"""
|
| 2037 |
+
Provides a resample of a groupby implementation.
|
| 2038 |
+
"""
|
| 2039 |
+
|
| 2040 |
+
@property
|
| 2041 |
+
def _resampler_cls(self):
|
| 2042 |
+
return TimedeltaIndexResampler
|
| 2043 |
+
|
| 2044 |
+
|
| 2045 |
+
def get_resampler(obj: Series | DataFrame, kind=None, **kwds) -> Resampler:
|
| 2046 |
+
"""
|
| 2047 |
+
Create a TimeGrouper and return our resampler.
|
| 2048 |
+
"""
|
| 2049 |
+
tg = TimeGrouper(obj, **kwds) # type: ignore[arg-type]
|
| 2050 |
+
return tg._get_resampler(obj, kind=kind)
|
| 2051 |
+
|
| 2052 |
+
|
| 2053 |
+
get_resampler.__doc__ = Resampler.__doc__
|
| 2054 |
+
|
| 2055 |
+
|
| 2056 |
+
def get_resampler_for_grouping(
|
| 2057 |
+
groupby: GroupBy,
|
| 2058 |
+
rule,
|
| 2059 |
+
how=None,
|
| 2060 |
+
fill_method=None,
|
| 2061 |
+
limit: int | None = None,
|
| 2062 |
+
kind=None,
|
| 2063 |
+
on=None,
|
| 2064 |
+
include_groups: bool = True,
|
| 2065 |
+
**kwargs,
|
| 2066 |
+
) -> Resampler:
|
| 2067 |
+
"""
|
| 2068 |
+
Return our appropriate resampler when grouping as well.
|
| 2069 |
+
"""
|
| 2070 |
+
# .resample uses 'on' similar to how .groupby uses 'key'
|
| 2071 |
+
tg = TimeGrouper(freq=rule, key=on, **kwargs)
|
| 2072 |
+
resampler = tg._get_resampler(groupby.obj, kind=kind)
|
| 2073 |
+
return resampler._get_resampler_for_grouping(
|
| 2074 |
+
groupby=groupby, include_groups=include_groups, key=tg.key
|
| 2075 |
+
)
|
| 2076 |
+
|
| 2077 |
+
|
| 2078 |
+
class TimeGrouper(Grouper):
|
| 2079 |
+
"""
|
| 2080 |
+
Custom groupby class for time-interval grouping.
|
| 2081 |
+
|
| 2082 |
+
Parameters
|
| 2083 |
+
----------
|
| 2084 |
+
freq : pandas date offset or offset alias for identifying bin edges
|
| 2085 |
+
closed : closed end of interval; 'left' or 'right'
|
| 2086 |
+
label : interval boundary to use for labeling; 'left' or 'right'
|
| 2087 |
+
convention : {'start', 'end', 'e', 's'}
|
| 2088 |
+
If axis is PeriodIndex
|
| 2089 |
+
"""
|
| 2090 |
+
|
| 2091 |
+
_attributes = Grouper._attributes + (
|
| 2092 |
+
"closed",
|
| 2093 |
+
"label",
|
| 2094 |
+
"how",
|
| 2095 |
+
"kind",
|
| 2096 |
+
"convention",
|
| 2097 |
+
"origin",
|
| 2098 |
+
"offset",
|
| 2099 |
+
)
|
| 2100 |
+
|
| 2101 |
+
origin: TimeGrouperOrigin
|
| 2102 |
+
|
| 2103 |
+
def __init__(
|
| 2104 |
+
self,
|
| 2105 |
+
obj: Grouper | None = None,
|
| 2106 |
+
freq: Frequency = "Min",
|
| 2107 |
+
key: str | None = None,
|
| 2108 |
+
closed: Literal["left", "right"] | None = None,
|
| 2109 |
+
label: Literal["left", "right"] | None = None,
|
| 2110 |
+
how: str = "mean",
|
| 2111 |
+
axis: Axis = 0,
|
| 2112 |
+
fill_method=None,
|
| 2113 |
+
limit: int | None = None,
|
| 2114 |
+
kind: str | None = None,
|
| 2115 |
+
convention: Literal["start", "end", "e", "s"] | None = None,
|
| 2116 |
+
origin: Literal["epoch", "start", "start_day", "end", "end_day"]
|
| 2117 |
+
| TimestampConvertibleTypes = "start_day",
|
| 2118 |
+
offset: TimedeltaConvertibleTypes | None = None,
|
| 2119 |
+
group_keys: bool = False,
|
| 2120 |
+
**kwargs,
|
| 2121 |
+
) -> None:
|
| 2122 |
+
# Check for correctness of the keyword arguments which would
|
| 2123 |
+
# otherwise silently use the default if misspelled
|
| 2124 |
+
if label not in {None, "left", "right"}:
|
| 2125 |
+
raise ValueError(f"Unsupported value {label} for `label`")
|
| 2126 |
+
if closed not in {None, "left", "right"}:
|
| 2127 |
+
raise ValueError(f"Unsupported value {closed} for `closed`")
|
| 2128 |
+
if convention not in {None, "start", "end", "e", "s"}:
|
| 2129 |
+
raise ValueError(f"Unsupported value {convention} for `convention`")
|
| 2130 |
+
|
| 2131 |
+
if (
|
| 2132 |
+
key is None
|
| 2133 |
+
and obj is not None
|
| 2134 |
+
and isinstance(obj.index, PeriodIndex) # type: ignore[attr-defined]
|
| 2135 |
+
or (
|
| 2136 |
+
key is not None
|
| 2137 |
+
and obj is not None
|
| 2138 |
+
and getattr(obj[key], "dtype", None) == "period" # type: ignore[index]
|
| 2139 |
+
)
|
| 2140 |
+
):
|
| 2141 |
+
freq = to_offset(freq, is_period=True)
|
| 2142 |
+
else:
|
| 2143 |
+
freq = to_offset(freq)
|
| 2144 |
+
|
| 2145 |
+
end_types = {"ME", "YE", "QE", "BME", "BYE", "BQE", "W"}
|
| 2146 |
+
rule = freq.rule_code
|
| 2147 |
+
if rule in end_types or ("-" in rule and rule[: rule.find("-")] in end_types):
|
| 2148 |
+
if closed is None:
|
| 2149 |
+
closed = "right"
|
| 2150 |
+
if label is None:
|
| 2151 |
+
label = "right"
|
| 2152 |
+
else:
|
| 2153 |
+
# The backward resample sets ``closed`` to ``'right'`` by default
|
| 2154 |
+
# since the last value should be considered as the edge point for
|
| 2155 |
+
# the last bin. When origin in "end" or "end_day", the value for a
|
| 2156 |
+
# specific ``Timestamp`` index stands for the resample result from
|
| 2157 |
+
# the current ``Timestamp`` minus ``freq`` to the current
|
| 2158 |
+
# ``Timestamp`` with a right close.
|
| 2159 |
+
if origin in ["end", "end_day"]:
|
| 2160 |
+
if closed is None:
|
| 2161 |
+
closed = "right"
|
| 2162 |
+
if label is None:
|
| 2163 |
+
label = "right"
|
| 2164 |
+
else:
|
| 2165 |
+
if closed is None:
|
| 2166 |
+
closed = "left"
|
| 2167 |
+
if label is None:
|
| 2168 |
+
label = "left"
|
| 2169 |
+
|
| 2170 |
+
self.closed = closed
|
| 2171 |
+
self.label = label
|
| 2172 |
+
self.kind = kind
|
| 2173 |
+
self.convention = convention if convention is not None else "e"
|
| 2174 |
+
self.how = how
|
| 2175 |
+
self.fill_method = fill_method
|
| 2176 |
+
self.limit = limit
|
| 2177 |
+
self.group_keys = group_keys
|
| 2178 |
+
self._arrow_dtype: ArrowDtype | None = None
|
| 2179 |
+
|
| 2180 |
+
if origin in ("epoch", "start", "start_day", "end", "end_day"):
|
| 2181 |
+
# error: Incompatible types in assignment (expression has type "Union[Union[
|
| 2182 |
+
# Timestamp, datetime, datetime64, signedinteger[_64Bit], float, str],
|
| 2183 |
+
# Literal['epoch', 'start', 'start_day', 'end', 'end_day']]", variable has
|
| 2184 |
+
# type "Union[Timestamp, Literal['epoch', 'start', 'start_day', 'end',
|
| 2185 |
+
# 'end_day']]")
|
| 2186 |
+
self.origin = origin # type: ignore[assignment]
|
| 2187 |
+
else:
|
| 2188 |
+
try:
|
| 2189 |
+
self.origin = Timestamp(origin)
|
| 2190 |
+
except (ValueError, TypeError) as err:
|
| 2191 |
+
raise ValueError(
|
| 2192 |
+
"'origin' should be equal to 'epoch', 'start', 'start_day', "
|
| 2193 |
+
"'end', 'end_day' or "
|
| 2194 |
+
f"should be a Timestamp convertible type. Got '{origin}' instead."
|
| 2195 |
+
) from err
|
| 2196 |
+
|
| 2197 |
+
try:
|
| 2198 |
+
self.offset = Timedelta(offset) if offset is not None else None
|
| 2199 |
+
except (ValueError, TypeError) as err:
|
| 2200 |
+
raise ValueError(
|
| 2201 |
+
"'offset' should be a Timedelta convertible type. "
|
| 2202 |
+
f"Got '{offset}' instead."
|
| 2203 |
+
) from err
|
| 2204 |
+
|
| 2205 |
+
# always sort time groupers
|
| 2206 |
+
kwargs["sort"] = True
|
| 2207 |
+
|
| 2208 |
+
super().__init__(freq=freq, key=key, axis=axis, **kwargs)
|
| 2209 |
+
|
| 2210 |
+
def _get_resampler(self, obj: NDFrame, kind=None) -> Resampler:
|
| 2211 |
+
"""
|
| 2212 |
+
Return my resampler or raise if we have an invalid axis.
|
| 2213 |
+
|
| 2214 |
+
Parameters
|
| 2215 |
+
----------
|
| 2216 |
+
obj : Series or DataFrame
|
| 2217 |
+
kind : string, optional
|
| 2218 |
+
'period','timestamp','timedelta' are valid
|
| 2219 |
+
|
| 2220 |
+
Returns
|
| 2221 |
+
-------
|
| 2222 |
+
Resampler
|
| 2223 |
+
|
| 2224 |
+
Raises
|
| 2225 |
+
------
|
| 2226 |
+
TypeError if incompatible axis
|
| 2227 |
+
|
| 2228 |
+
"""
|
| 2229 |
+
_, ax, _ = self._set_grouper(obj, gpr_index=None)
|
| 2230 |
+
if isinstance(ax, DatetimeIndex):
|
| 2231 |
+
return DatetimeIndexResampler(
|
| 2232 |
+
obj,
|
| 2233 |
+
timegrouper=self,
|
| 2234 |
+
kind=kind,
|
| 2235 |
+
axis=self.axis,
|
| 2236 |
+
group_keys=self.group_keys,
|
| 2237 |
+
gpr_index=ax,
|
| 2238 |
+
)
|
| 2239 |
+
elif isinstance(ax, PeriodIndex) or kind == "period":
|
| 2240 |
+
if isinstance(ax, PeriodIndex):
|
| 2241 |
+
# GH#53481
|
| 2242 |
+
warnings.warn(
|
| 2243 |
+
"Resampling with a PeriodIndex is deprecated. "
|
| 2244 |
+
"Cast index to DatetimeIndex before resampling instead.",
|
| 2245 |
+
FutureWarning,
|
| 2246 |
+
stacklevel=find_stack_level(),
|
| 2247 |
+
)
|
| 2248 |
+
else:
|
| 2249 |
+
warnings.warn(
|
| 2250 |
+
"Resampling with kind='period' is deprecated. "
|
| 2251 |
+
"Use datetime paths instead.",
|
| 2252 |
+
FutureWarning,
|
| 2253 |
+
stacklevel=find_stack_level(),
|
| 2254 |
+
)
|
| 2255 |
+
return PeriodIndexResampler(
|
| 2256 |
+
obj,
|
| 2257 |
+
timegrouper=self,
|
| 2258 |
+
kind=kind,
|
| 2259 |
+
axis=self.axis,
|
| 2260 |
+
group_keys=self.group_keys,
|
| 2261 |
+
gpr_index=ax,
|
| 2262 |
+
)
|
| 2263 |
+
elif isinstance(ax, TimedeltaIndex):
|
| 2264 |
+
return TimedeltaIndexResampler(
|
| 2265 |
+
obj,
|
| 2266 |
+
timegrouper=self,
|
| 2267 |
+
axis=self.axis,
|
| 2268 |
+
group_keys=self.group_keys,
|
| 2269 |
+
gpr_index=ax,
|
| 2270 |
+
)
|
| 2271 |
+
|
| 2272 |
+
raise TypeError(
|
| 2273 |
+
"Only valid with DatetimeIndex, "
|
| 2274 |
+
"TimedeltaIndex or PeriodIndex, "
|
| 2275 |
+
f"but got an instance of '{type(ax).__name__}'"
|
| 2276 |
+
)
|
| 2277 |
+
|
| 2278 |
+
def _get_grouper(
|
| 2279 |
+
self, obj: NDFrameT, validate: bool = True
|
| 2280 |
+
) -> tuple[BinGrouper, NDFrameT]:
|
| 2281 |
+
# create the resampler and return our binner
|
| 2282 |
+
r = self._get_resampler(obj)
|
| 2283 |
+
return r._grouper, cast(NDFrameT, r.obj)
|
| 2284 |
+
|
| 2285 |
+
def _get_time_bins(self, ax: DatetimeIndex):
|
| 2286 |
+
if not isinstance(ax, DatetimeIndex):
|
| 2287 |
+
raise TypeError(
|
| 2288 |
+
"axis must be a DatetimeIndex, but got "
|
| 2289 |
+
f"an instance of {type(ax).__name__}"
|
| 2290 |
+
)
|
| 2291 |
+
|
| 2292 |
+
if len(ax) == 0:
|
| 2293 |
+
binner = labels = DatetimeIndex(
|
| 2294 |
+
data=[], freq=self.freq, name=ax.name, dtype=ax.dtype
|
| 2295 |
+
)
|
| 2296 |
+
return binner, [], labels
|
| 2297 |
+
|
| 2298 |
+
first, last = _get_timestamp_range_edges(
|
| 2299 |
+
ax.min(),
|
| 2300 |
+
ax.max(),
|
| 2301 |
+
self.freq,
|
| 2302 |
+
unit=ax.unit,
|
| 2303 |
+
closed=self.closed,
|
| 2304 |
+
origin=self.origin,
|
| 2305 |
+
offset=self.offset,
|
| 2306 |
+
)
|
| 2307 |
+
# GH #12037
|
| 2308 |
+
# use first/last directly instead of call replace() on them
|
| 2309 |
+
# because replace() will swallow the nanosecond part
|
| 2310 |
+
# thus last bin maybe slightly before the end if the end contains
|
| 2311 |
+
# nanosecond part and lead to `Values falls after last bin` error
|
| 2312 |
+
# GH 25758: If DST lands at midnight (e.g. 'America/Havana'), user feedback
|
| 2313 |
+
# has noted that ambiguous=True provides the most sensible result
|
| 2314 |
+
binner = labels = date_range(
|
| 2315 |
+
freq=self.freq,
|
| 2316 |
+
start=first,
|
| 2317 |
+
end=last,
|
| 2318 |
+
tz=ax.tz,
|
| 2319 |
+
name=ax.name,
|
| 2320 |
+
ambiguous=True,
|
| 2321 |
+
nonexistent="shift_forward",
|
| 2322 |
+
unit=ax.unit,
|
| 2323 |
+
)
|
| 2324 |
+
|
| 2325 |
+
ax_values = ax.asi8
|
| 2326 |
+
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
|
| 2327 |
+
|
| 2328 |
+
# general version, knowing nothing about relative frequencies
|
| 2329 |
+
bins = lib.generate_bins_dt64(
|
| 2330 |
+
ax_values, bin_edges, self.closed, hasnans=ax.hasnans
|
| 2331 |
+
)
|
| 2332 |
+
|
| 2333 |
+
if self.closed == "right":
|
| 2334 |
+
labels = binner
|
| 2335 |
+
if self.label == "right":
|
| 2336 |
+
labels = labels[1:]
|
| 2337 |
+
elif self.label == "right":
|
| 2338 |
+
labels = labels[1:]
|
| 2339 |
+
|
| 2340 |
+
if ax.hasnans:
|
| 2341 |
+
binner = binner.insert(0, NaT)
|
| 2342 |
+
labels = labels.insert(0, NaT)
|
| 2343 |
+
|
| 2344 |
+
# if we end up with more labels than bins
|
| 2345 |
+
# adjust the labels
|
| 2346 |
+
# GH4076
|
| 2347 |
+
if len(bins) < len(labels):
|
| 2348 |
+
labels = labels[: len(bins)]
|
| 2349 |
+
|
| 2350 |
+
return binner, bins, labels
|
| 2351 |
+
|
| 2352 |
+
def _adjust_bin_edges(
|
| 2353 |
+
self, binner: DatetimeIndex, ax_values: npt.NDArray[np.int64]
|
| 2354 |
+
) -> tuple[DatetimeIndex, npt.NDArray[np.int64]]:
|
| 2355 |
+
# Some hacks for > daily data, see #1471, #1458, #1483
|
| 2356 |
+
|
| 2357 |
+
if self.freq.name in ("BME", "ME", "W") or self.freq.name.split("-")[0] in (
|
| 2358 |
+
"BQE",
|
| 2359 |
+
"BYE",
|
| 2360 |
+
"QE",
|
| 2361 |
+
"YE",
|
| 2362 |
+
"W",
|
| 2363 |
+
):
|
| 2364 |
+
# If the right end-point is on the last day of the month, roll forwards
|
| 2365 |
+
# until the last moment of that day. Note that we only do this for offsets
|
| 2366 |
+
# which correspond to the end of a super-daily period - "month start", for
|
| 2367 |
+
# example, is excluded.
|
| 2368 |
+
if self.closed == "right":
|
| 2369 |
+
# GH 21459, GH 9119: Adjust the bins relative to the wall time
|
| 2370 |
+
edges_dti = binner.tz_localize(None)
|
| 2371 |
+
edges_dti = (
|
| 2372 |
+
edges_dti
|
| 2373 |
+
+ Timedelta(days=1, unit=edges_dti.unit).as_unit(edges_dti.unit)
|
| 2374 |
+
- Timedelta(1, unit=edges_dti.unit).as_unit(edges_dti.unit)
|
| 2375 |
+
)
|
| 2376 |
+
bin_edges = edges_dti.tz_localize(binner.tz).asi8
|
| 2377 |
+
else:
|
| 2378 |
+
bin_edges = binner.asi8
|
| 2379 |
+
|
| 2380 |
+
# intraday values on last day
|
| 2381 |
+
if bin_edges[-2] > ax_values.max():
|
| 2382 |
+
bin_edges = bin_edges[:-1]
|
| 2383 |
+
binner = binner[:-1]
|
| 2384 |
+
else:
|
| 2385 |
+
bin_edges = binner.asi8
|
| 2386 |
+
return binner, bin_edges
|
| 2387 |
+
|
| 2388 |
+
def _get_time_delta_bins(self, ax: TimedeltaIndex):
|
| 2389 |
+
if not isinstance(ax, TimedeltaIndex):
|
| 2390 |
+
raise TypeError(
|
| 2391 |
+
"axis must be a TimedeltaIndex, but got "
|
| 2392 |
+
f"an instance of {type(ax).__name__}"
|
| 2393 |
+
)
|
| 2394 |
+
|
| 2395 |
+
if not isinstance(self.freq, Tick):
|
| 2396 |
+
# GH#51896
|
| 2397 |
+
raise ValueError(
|
| 2398 |
+
"Resampling on a TimedeltaIndex requires fixed-duration `freq`, "
|
| 2399 |
+
f"e.g. '24h' or '3D', not {self.freq}"
|
| 2400 |
+
)
|
| 2401 |
+
|
| 2402 |
+
if not len(ax):
|
| 2403 |
+
binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name)
|
| 2404 |
+
return binner, [], labels
|
| 2405 |
+
|
| 2406 |
+
start, end = ax.min(), ax.max()
|
| 2407 |
+
|
| 2408 |
+
if self.closed == "right":
|
| 2409 |
+
end += self.freq
|
| 2410 |
+
|
| 2411 |
+
labels = binner = timedelta_range(
|
| 2412 |
+
start=start, end=end, freq=self.freq, name=ax.name
|
| 2413 |
+
)
|
| 2414 |
+
|
| 2415 |
+
end_stamps = labels
|
| 2416 |
+
if self.closed == "left":
|
| 2417 |
+
end_stamps += self.freq
|
| 2418 |
+
|
| 2419 |
+
bins = ax.searchsorted(end_stamps, side=self.closed)
|
| 2420 |
+
|
| 2421 |
+
if self.offset:
|
| 2422 |
+
# GH 10530 & 31809
|
| 2423 |
+
labels += self.offset
|
| 2424 |
+
|
| 2425 |
+
return binner, bins, labels
|
| 2426 |
+
|
| 2427 |
+
def _get_time_period_bins(self, ax: DatetimeIndex):
|
| 2428 |
+
if not isinstance(ax, DatetimeIndex):
|
| 2429 |
+
raise TypeError(
|
| 2430 |
+
"axis must be a DatetimeIndex, but got "
|
| 2431 |
+
f"an instance of {type(ax).__name__}"
|
| 2432 |
+
)
|
| 2433 |
+
|
| 2434 |
+
freq = self.freq
|
| 2435 |
+
|
| 2436 |
+
if len(ax) == 0:
|
| 2437 |
+
binner = labels = PeriodIndex(
|
| 2438 |
+
data=[], freq=freq, name=ax.name, dtype=ax.dtype
|
| 2439 |
+
)
|
| 2440 |
+
return binner, [], labels
|
| 2441 |
+
|
| 2442 |
+
labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name)
|
| 2443 |
+
|
| 2444 |
+
end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp()
|
| 2445 |
+
if ax.tz:
|
| 2446 |
+
end_stamps = end_stamps.tz_localize(ax.tz)
|
| 2447 |
+
bins = ax.searchsorted(end_stamps, side="left")
|
| 2448 |
+
|
| 2449 |
+
return binner, bins, labels
|
| 2450 |
+
|
| 2451 |
+
def _get_period_bins(self, ax: PeriodIndex):
|
| 2452 |
+
if not isinstance(ax, PeriodIndex):
|
| 2453 |
+
raise TypeError(
|
| 2454 |
+
"axis must be a PeriodIndex, but got "
|
| 2455 |
+
f"an instance of {type(ax).__name__}"
|
| 2456 |
+
)
|
| 2457 |
+
|
| 2458 |
+
memb = ax.asfreq(self.freq, how=self.convention)
|
| 2459 |
+
|
| 2460 |
+
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
|
| 2461 |
+
nat_count = 0
|
| 2462 |
+
if memb.hasnans:
|
| 2463 |
+
# error: Incompatible types in assignment (expression has type
|
| 2464 |
+
# "bool_", variable has type "int") [assignment]
|
| 2465 |
+
nat_count = np.sum(memb._isnan) # type: ignore[assignment]
|
| 2466 |
+
memb = memb[~memb._isnan]
|
| 2467 |
+
|
| 2468 |
+
if not len(memb):
|
| 2469 |
+
# index contains no valid (non-NaT) values
|
| 2470 |
+
bins = np.array([], dtype=np.int64)
|
| 2471 |
+
binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name)
|
| 2472 |
+
if len(ax) > 0:
|
| 2473 |
+
# index is all NaT
|
| 2474 |
+
binner, bins, labels = _insert_nat_bin(binner, bins, labels, len(ax))
|
| 2475 |
+
return binner, bins, labels
|
| 2476 |
+
|
| 2477 |
+
freq_mult = self.freq.n
|
| 2478 |
+
|
| 2479 |
+
start = ax.min().asfreq(self.freq, how=self.convention)
|
| 2480 |
+
end = ax.max().asfreq(self.freq, how="end")
|
| 2481 |
+
bin_shift = 0
|
| 2482 |
+
|
| 2483 |
+
if isinstance(self.freq, Tick):
|
| 2484 |
+
# GH 23882 & 31809: get adjusted bin edge labels with 'origin'
|
| 2485 |
+
# and 'origin' support. This call only makes sense if the freq is a
|
| 2486 |
+
# Tick since offset and origin are only used in those cases.
|
| 2487 |
+
# Not doing this check could create an extra empty bin.
|
| 2488 |
+
p_start, end = _get_period_range_edges(
|
| 2489 |
+
start,
|
| 2490 |
+
end,
|
| 2491 |
+
self.freq,
|
| 2492 |
+
closed=self.closed,
|
| 2493 |
+
origin=self.origin,
|
| 2494 |
+
offset=self.offset,
|
| 2495 |
+
)
|
| 2496 |
+
|
| 2497 |
+
# Get offset for bin edge (not label edge) adjustment
|
| 2498 |
+
start_offset = Period(start, self.freq) - Period(p_start, self.freq)
|
| 2499 |
+
# error: Item "Period" of "Union[Period, Any]" has no attribute "n"
|
| 2500 |
+
bin_shift = start_offset.n % freq_mult # type: ignore[union-attr]
|
| 2501 |
+
start = p_start
|
| 2502 |
+
|
| 2503 |
+
labels = binner = period_range(
|
| 2504 |
+
start=start, end=end, freq=self.freq, name=ax.name
|
| 2505 |
+
)
|
| 2506 |
+
|
| 2507 |
+
i8 = memb.asi8
|
| 2508 |
+
|
| 2509 |
+
# when upsampling to subperiods, we need to generate enough bins
|
| 2510 |
+
expected_bins_count = len(binner) * freq_mult
|
| 2511 |
+
i8_extend = expected_bins_count - (i8[-1] - i8[0])
|
| 2512 |
+
rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult)
|
| 2513 |
+
rng += freq_mult
|
| 2514 |
+
# adjust bin edge indexes to account for base
|
| 2515 |
+
rng -= bin_shift
|
| 2516 |
+
|
| 2517 |
+
# Wrap in PeriodArray for PeriodArray.searchsorted
|
| 2518 |
+
prng = type(memb._data)(rng, dtype=memb.dtype)
|
| 2519 |
+
bins = memb.searchsorted(prng, side="left")
|
| 2520 |
+
|
| 2521 |
+
if nat_count > 0:
|
| 2522 |
+
binner, bins, labels = _insert_nat_bin(binner, bins, labels, nat_count)
|
| 2523 |
+
|
| 2524 |
+
return binner, bins, labels
|
| 2525 |
+
|
| 2526 |
+
def _set_grouper(
|
| 2527 |
+
self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None
|
| 2528 |
+
) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]:
|
| 2529 |
+
obj, ax, indexer = super()._set_grouper(obj, sort, gpr_index=gpr_index)
|
| 2530 |
+
if isinstance(ax.dtype, ArrowDtype) and ax.dtype.kind in "Mm":
|
| 2531 |
+
self._arrow_dtype = ax.dtype
|
| 2532 |
+
ax = Index(
|
| 2533 |
+
cast(ArrowExtensionArray, ax.array)._maybe_convert_datelike_array()
|
| 2534 |
+
)
|
| 2535 |
+
return obj, ax, indexer
|
| 2536 |
+
|
| 2537 |
+
|
| 2538 |
+
def _take_new_index(
|
| 2539 |
+
obj: NDFrameT, indexer: npt.NDArray[np.intp], new_index: Index, axis: AxisInt = 0
|
| 2540 |
+
) -> NDFrameT:
|
| 2541 |
+
if isinstance(obj, ABCSeries):
|
| 2542 |
+
new_values = algos.take_nd(obj._values, indexer)
|
| 2543 |
+
# error: Incompatible return value type (got "Series", expected "NDFrameT")
|
| 2544 |
+
return obj._constructor( # type: ignore[return-value]
|
| 2545 |
+
new_values, index=new_index, name=obj.name
|
| 2546 |
+
)
|
| 2547 |
+
elif isinstance(obj, ABCDataFrame):
|
| 2548 |
+
if axis == 1:
|
| 2549 |
+
raise NotImplementedError("axis 1 is not supported")
|
| 2550 |
+
new_mgr = obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1)
|
| 2551 |
+
# error: Incompatible return value type (got "DataFrame", expected "NDFrameT")
|
| 2552 |
+
return obj._constructor_from_mgr(new_mgr, axes=new_mgr.axes) # type: ignore[return-value]
|
| 2553 |
+
else:
|
| 2554 |
+
raise ValueError("'obj' should be either a Series or a DataFrame")
|
| 2555 |
+
|
| 2556 |
+
|
| 2557 |
+
def _get_timestamp_range_edges(
|
| 2558 |
+
first: Timestamp,
|
| 2559 |
+
last: Timestamp,
|
| 2560 |
+
freq: BaseOffset,
|
| 2561 |
+
unit: str,
|
| 2562 |
+
closed: Literal["right", "left"] = "left",
|
| 2563 |
+
origin: TimeGrouperOrigin = "start_day",
|
| 2564 |
+
offset: Timedelta | None = None,
|
| 2565 |
+
) -> tuple[Timestamp, Timestamp]:
|
| 2566 |
+
"""
|
| 2567 |
+
Adjust the `first` Timestamp to the preceding Timestamp that resides on
|
| 2568 |
+
the provided offset. Adjust the `last` Timestamp to the following
|
| 2569 |
+
Timestamp that resides on the provided offset. Input Timestamps that
|
| 2570 |
+
already reside on the offset will be adjusted depending on the type of
|
| 2571 |
+
offset and the `closed` parameter.
|
| 2572 |
+
|
| 2573 |
+
Parameters
|
| 2574 |
+
----------
|
| 2575 |
+
first : pd.Timestamp
|
| 2576 |
+
The beginning Timestamp of the range to be adjusted.
|
| 2577 |
+
last : pd.Timestamp
|
| 2578 |
+
The ending Timestamp of the range to be adjusted.
|
| 2579 |
+
freq : pd.DateOffset
|
| 2580 |
+
The dateoffset to which the Timestamps will be adjusted.
|
| 2581 |
+
closed : {'right', 'left'}, default "left"
|
| 2582 |
+
Which side of bin interval is closed.
|
| 2583 |
+
origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day'
|
| 2584 |
+
The timestamp on which to adjust the grouping. The timezone of origin must
|
| 2585 |
+
match the timezone of the index.
|
| 2586 |
+
If a timestamp is not used, these values are also supported:
|
| 2587 |
+
|
| 2588 |
+
- 'epoch': `origin` is 1970-01-01
|
| 2589 |
+
- 'start': `origin` is the first value of the timeseries
|
| 2590 |
+
- 'start_day': `origin` is the first day at midnight of the timeseries
|
| 2591 |
+
offset : pd.Timedelta, default is None
|
| 2592 |
+
An offset timedelta added to the origin.
|
| 2593 |
+
|
| 2594 |
+
Returns
|
| 2595 |
+
-------
|
| 2596 |
+
A tuple of length 2, containing the adjusted pd.Timestamp objects.
|
| 2597 |
+
"""
|
| 2598 |
+
if isinstance(freq, Tick):
|
| 2599 |
+
index_tz = first.tz
|
| 2600 |
+
if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None):
|
| 2601 |
+
raise ValueError("The origin must have the same timezone as the index.")
|
| 2602 |
+
if origin == "epoch":
|
| 2603 |
+
# set the epoch based on the timezone to have similar bins results when
|
| 2604 |
+
# resampling on the same kind of indexes on different timezones
|
| 2605 |
+
origin = Timestamp("1970-01-01", tz=index_tz)
|
| 2606 |
+
|
| 2607 |
+
if isinstance(freq, Day):
|
| 2608 |
+
# _adjust_dates_anchored assumes 'D' means 24h, but first/last
|
| 2609 |
+
# might contain a DST transition (23h, 24h, or 25h).
|
| 2610 |
+
# So "pretend" the dates are naive when adjusting the endpoints
|
| 2611 |
+
first = first.tz_localize(None)
|
| 2612 |
+
last = last.tz_localize(None)
|
| 2613 |
+
if isinstance(origin, Timestamp):
|
| 2614 |
+
origin = origin.tz_localize(None)
|
| 2615 |
+
|
| 2616 |
+
first, last = _adjust_dates_anchored(
|
| 2617 |
+
first, last, freq, closed=closed, origin=origin, offset=offset, unit=unit
|
| 2618 |
+
)
|
| 2619 |
+
if isinstance(freq, Day):
|
| 2620 |
+
first = first.tz_localize(index_tz)
|
| 2621 |
+
last = last.tz_localize(index_tz)
|
| 2622 |
+
else:
|
| 2623 |
+
first = first.normalize()
|
| 2624 |
+
last = last.normalize()
|
| 2625 |
+
|
| 2626 |
+
if closed == "left":
|
| 2627 |
+
first = Timestamp(freq.rollback(first))
|
| 2628 |
+
else:
|
| 2629 |
+
first = Timestamp(first - freq)
|
| 2630 |
+
|
| 2631 |
+
last = Timestamp(last + freq)
|
| 2632 |
+
|
| 2633 |
+
return first, last
|
| 2634 |
+
|
| 2635 |
+
|
| 2636 |
+
def _get_period_range_edges(
|
| 2637 |
+
first: Period,
|
| 2638 |
+
last: Period,
|
| 2639 |
+
freq: BaseOffset,
|
| 2640 |
+
closed: Literal["right", "left"] = "left",
|
| 2641 |
+
origin: TimeGrouperOrigin = "start_day",
|
| 2642 |
+
offset: Timedelta | None = None,
|
| 2643 |
+
) -> tuple[Period, Period]:
|
| 2644 |
+
"""
|
| 2645 |
+
Adjust the provided `first` and `last` Periods to the respective Period of
|
| 2646 |
+
the given offset that encompasses them.
|
| 2647 |
+
|
| 2648 |
+
Parameters
|
| 2649 |
+
----------
|
| 2650 |
+
first : pd.Period
|
| 2651 |
+
The beginning Period of the range to be adjusted.
|
| 2652 |
+
last : pd.Period
|
| 2653 |
+
The ending Period of the range to be adjusted.
|
| 2654 |
+
freq : pd.DateOffset
|
| 2655 |
+
The freq to which the Periods will be adjusted.
|
| 2656 |
+
closed : {'right', 'left'}, default "left"
|
| 2657 |
+
Which side of bin interval is closed.
|
| 2658 |
+
origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day'
|
| 2659 |
+
The timestamp on which to adjust the grouping. The timezone of origin must
|
| 2660 |
+
match the timezone of the index.
|
| 2661 |
+
|
| 2662 |
+
If a timestamp is not used, these values are also supported:
|
| 2663 |
+
|
| 2664 |
+
- 'epoch': `origin` is 1970-01-01
|
| 2665 |
+
- 'start': `origin` is the first value of the timeseries
|
| 2666 |
+
- 'start_day': `origin` is the first day at midnight of the timeseries
|
| 2667 |
+
offset : pd.Timedelta, default is None
|
| 2668 |
+
An offset timedelta added to the origin.
|
| 2669 |
+
|
| 2670 |
+
Returns
|
| 2671 |
+
-------
|
| 2672 |
+
A tuple of length 2, containing the adjusted pd.Period objects.
|
| 2673 |
+
"""
|
| 2674 |
+
if not all(isinstance(obj, Period) for obj in [first, last]):
|
| 2675 |
+
raise TypeError("'first' and 'last' must be instances of type Period")
|
| 2676 |
+
|
| 2677 |
+
# GH 23882
|
| 2678 |
+
first_ts = first.to_timestamp()
|
| 2679 |
+
last_ts = last.to_timestamp()
|
| 2680 |
+
adjust_first = not freq.is_on_offset(first_ts)
|
| 2681 |
+
adjust_last = freq.is_on_offset(last_ts)
|
| 2682 |
+
|
| 2683 |
+
first_ts, last_ts = _get_timestamp_range_edges(
|
| 2684 |
+
first_ts, last_ts, freq, unit="ns", closed=closed, origin=origin, offset=offset
|
| 2685 |
+
)
|
| 2686 |
+
|
| 2687 |
+
first = (first_ts + int(adjust_first) * freq).to_period(freq)
|
| 2688 |
+
last = (last_ts - int(adjust_last) * freq).to_period(freq)
|
| 2689 |
+
return first, last
|
| 2690 |
+
|
| 2691 |
+
|
| 2692 |
+
def _insert_nat_bin(
|
| 2693 |
+
binner: PeriodIndex, bins: np.ndarray, labels: PeriodIndex, nat_count: int
|
| 2694 |
+
) -> tuple[PeriodIndex, np.ndarray, PeriodIndex]:
|
| 2695 |
+
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
|
| 2696 |
+
# shift bins by the number of NaT
|
| 2697 |
+
assert nat_count > 0
|
| 2698 |
+
bins += nat_count
|
| 2699 |
+
bins = np.insert(bins, 0, nat_count)
|
| 2700 |
+
|
| 2701 |
+
# Incompatible types in assignment (expression has type "Index", variable
|
| 2702 |
+
# has type "PeriodIndex")
|
| 2703 |
+
binner = binner.insert(0, NaT) # type: ignore[assignment]
|
| 2704 |
+
# Incompatible types in assignment (expression has type "Index", variable
|
| 2705 |
+
# has type "PeriodIndex")
|
| 2706 |
+
labels = labels.insert(0, NaT) # type: ignore[assignment]
|
| 2707 |
+
return binner, bins, labels
|
| 2708 |
+
|
| 2709 |
+
|
| 2710 |
+
def _adjust_dates_anchored(
|
| 2711 |
+
first: Timestamp,
|
| 2712 |
+
last: Timestamp,
|
| 2713 |
+
freq: Tick,
|
| 2714 |
+
closed: Literal["right", "left"] = "right",
|
| 2715 |
+
origin: TimeGrouperOrigin = "start_day",
|
| 2716 |
+
offset: Timedelta | None = None,
|
| 2717 |
+
unit: str = "ns",
|
| 2718 |
+
) -> tuple[Timestamp, Timestamp]:
|
| 2719 |
+
# First and last offsets should be calculated from the start day to fix an
|
| 2720 |
+
# error cause by resampling across multiple days when a one day period is
|
| 2721 |
+
# not a multiple of the frequency. See GH 8683
|
| 2722 |
+
# To handle frequencies that are not multiple or divisible by a day we let
|
| 2723 |
+
# the possibility to define a fixed origin timestamp. See GH 31809
|
| 2724 |
+
first = first.as_unit(unit)
|
| 2725 |
+
last = last.as_unit(unit)
|
| 2726 |
+
if offset is not None:
|
| 2727 |
+
offset = offset.as_unit(unit)
|
| 2728 |
+
|
| 2729 |
+
freq_value = Timedelta(freq).as_unit(unit)._value
|
| 2730 |
+
|
| 2731 |
+
origin_timestamp = 0 # origin == "epoch"
|
| 2732 |
+
if origin == "start_day":
|
| 2733 |
+
origin_timestamp = first.normalize()._value
|
| 2734 |
+
elif origin == "start":
|
| 2735 |
+
origin_timestamp = first._value
|
| 2736 |
+
elif isinstance(origin, Timestamp):
|
| 2737 |
+
origin_timestamp = origin.as_unit(unit)._value
|
| 2738 |
+
elif origin in ["end", "end_day"]:
|
| 2739 |
+
origin_last = last if origin == "end" else last.ceil("D")
|
| 2740 |
+
sub_freq_times = (origin_last._value - first._value) // freq_value
|
| 2741 |
+
if closed == "left":
|
| 2742 |
+
sub_freq_times += 1
|
| 2743 |
+
first = origin_last - sub_freq_times * freq
|
| 2744 |
+
origin_timestamp = first._value
|
| 2745 |
+
origin_timestamp += offset._value if offset else 0
|
| 2746 |
+
|
| 2747 |
+
# GH 10117 & GH 19375. If first and last contain timezone information,
|
| 2748 |
+
# Perform the calculation in UTC in order to avoid localizing on an
|
| 2749 |
+
# Ambiguous or Nonexistent time.
|
| 2750 |
+
first_tzinfo = first.tzinfo
|
| 2751 |
+
last_tzinfo = last.tzinfo
|
| 2752 |
+
if first_tzinfo is not None:
|
| 2753 |
+
first = first.tz_convert("UTC")
|
| 2754 |
+
if last_tzinfo is not None:
|
| 2755 |
+
last = last.tz_convert("UTC")
|
| 2756 |
+
|
| 2757 |
+
foffset = (first._value - origin_timestamp) % freq_value
|
| 2758 |
+
loffset = (last._value - origin_timestamp) % freq_value
|
| 2759 |
+
|
| 2760 |
+
if closed == "right":
|
| 2761 |
+
if foffset > 0:
|
| 2762 |
+
# roll back
|
| 2763 |
+
fresult_int = first._value - foffset
|
| 2764 |
+
else:
|
| 2765 |
+
fresult_int = first._value - freq_value
|
| 2766 |
+
|
| 2767 |
+
if loffset > 0:
|
| 2768 |
+
# roll forward
|
| 2769 |
+
lresult_int = last._value + (freq_value - loffset)
|
| 2770 |
+
else:
|
| 2771 |
+
# already the end of the road
|
| 2772 |
+
lresult_int = last._value
|
| 2773 |
+
else: # closed == 'left'
|
| 2774 |
+
if foffset > 0:
|
| 2775 |
+
fresult_int = first._value - foffset
|
| 2776 |
+
else:
|
| 2777 |
+
# start of the road
|
| 2778 |
+
fresult_int = first._value
|
| 2779 |
+
|
| 2780 |
+
if loffset > 0:
|
| 2781 |
+
# roll forward
|
| 2782 |
+
lresult_int = last._value + (freq_value - loffset)
|
| 2783 |
+
else:
|
| 2784 |
+
lresult_int = last._value + freq_value
|
| 2785 |
+
fresult = Timestamp(fresult_int, unit=unit)
|
| 2786 |
+
lresult = Timestamp(lresult_int, unit=unit)
|
| 2787 |
+
if first_tzinfo is not None:
|
| 2788 |
+
fresult = fresult.tz_localize("UTC").tz_convert(first_tzinfo)
|
| 2789 |
+
if last_tzinfo is not None:
|
| 2790 |
+
lresult = lresult.tz_localize("UTC").tz_convert(last_tzinfo)
|
| 2791 |
+
return fresult, lresult
|
| 2792 |
+
|
| 2793 |
+
|
| 2794 |
+
def asfreq(
|
| 2795 |
+
obj: NDFrameT,
|
| 2796 |
+
freq,
|
| 2797 |
+
method=None,
|
| 2798 |
+
how=None,
|
| 2799 |
+
normalize: bool = False,
|
| 2800 |
+
fill_value=None,
|
| 2801 |
+
) -> NDFrameT:
|
| 2802 |
+
"""
|
| 2803 |
+
Utility frequency conversion method for Series/DataFrame.
|
| 2804 |
+
|
| 2805 |
+
See :meth:`pandas.NDFrame.asfreq` for full documentation.
|
| 2806 |
+
"""
|
| 2807 |
+
if isinstance(obj.index, PeriodIndex):
|
| 2808 |
+
if method is not None:
|
| 2809 |
+
raise NotImplementedError("'method' argument is not supported")
|
| 2810 |
+
|
| 2811 |
+
if how is None:
|
| 2812 |
+
how = "E"
|
| 2813 |
+
|
| 2814 |
+
if isinstance(freq, BaseOffset):
|
| 2815 |
+
if hasattr(freq, "_period_dtype_code"):
|
| 2816 |
+
freq = freq_to_period_freqstr(freq.n, freq.name)
|
| 2817 |
+
else:
|
| 2818 |
+
raise ValueError(
|
| 2819 |
+
f"Invalid offset: '{freq.base}' for converting time series "
|
| 2820 |
+
f"with PeriodIndex."
|
| 2821 |
+
)
|
| 2822 |
+
|
| 2823 |
+
new_obj = obj.copy()
|
| 2824 |
+
new_obj.index = obj.index.asfreq(freq, how=how)
|
| 2825 |
+
|
| 2826 |
+
elif len(obj.index) == 0:
|
| 2827 |
+
new_obj = obj.copy()
|
| 2828 |
+
|
| 2829 |
+
new_obj.index = _asfreq_compat(obj.index, freq)
|
| 2830 |
+
else:
|
| 2831 |
+
unit = None
|
| 2832 |
+
if isinstance(obj.index, DatetimeIndex):
|
| 2833 |
+
# TODO: should we disallow non-DatetimeIndex?
|
| 2834 |
+
unit = obj.index.unit
|
| 2835 |
+
dti = date_range(obj.index.min(), obj.index.max(), freq=freq, unit=unit)
|
| 2836 |
+
dti.name = obj.index.name
|
| 2837 |
+
new_obj = obj.reindex(dti, method=method, fill_value=fill_value)
|
| 2838 |
+
if normalize:
|
| 2839 |
+
new_obj.index = new_obj.index.normalize()
|
| 2840 |
+
|
| 2841 |
+
return new_obj
|
| 2842 |
+
|
| 2843 |
+
|
| 2844 |
+
def _asfreq_compat(index: DatetimeIndex | PeriodIndex | TimedeltaIndex, freq):
|
| 2845 |
+
"""
|
| 2846 |
+
Helper to mimic asfreq on (empty) DatetimeIndex and TimedeltaIndex.
|
| 2847 |
+
|
| 2848 |
+
Parameters
|
| 2849 |
+
----------
|
| 2850 |
+
index : PeriodIndex, DatetimeIndex, or TimedeltaIndex
|
| 2851 |
+
freq : DateOffset
|
| 2852 |
+
|
| 2853 |
+
Returns
|
| 2854 |
+
-------
|
| 2855 |
+
same type as index
|
| 2856 |
+
"""
|
| 2857 |
+
if len(index) != 0:
|
| 2858 |
+
# This should never be reached, always checked by the caller
|
| 2859 |
+
raise ValueError(
|
| 2860 |
+
"Can only set arbitrary freq for empty DatetimeIndex or TimedeltaIndex"
|
| 2861 |
+
)
|
| 2862 |
+
new_index: Index
|
| 2863 |
+
if isinstance(index, PeriodIndex):
|
| 2864 |
+
new_index = index.asfreq(freq=freq)
|
| 2865 |
+
elif isinstance(index, DatetimeIndex):
|
| 2866 |
+
new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name)
|
| 2867 |
+
elif isinstance(index, TimedeltaIndex):
|
| 2868 |
+
new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name)
|
| 2869 |
+
else: # pragma: no cover
|
| 2870 |
+
raise TypeError(type(index))
|
| 2871 |
+
return new_index
|
| 2872 |
+
|
| 2873 |
+
|
| 2874 |
+
def maybe_warn_args_and_kwargs(cls, kernel: str, args, kwargs) -> None:
|
| 2875 |
+
"""
|
| 2876 |
+
Warn for deprecation of args and kwargs in resample functions.
|
| 2877 |
+
|
| 2878 |
+
Parameters
|
| 2879 |
+
----------
|
| 2880 |
+
cls : type
|
| 2881 |
+
Class to warn about.
|
| 2882 |
+
kernel : str
|
| 2883 |
+
Operation name.
|
| 2884 |
+
args : tuple or None
|
| 2885 |
+
args passed by user. Will be None if and only if kernel does not have args.
|
| 2886 |
+
kwargs : dict or None
|
| 2887 |
+
kwargs passed by user. Will be None if and only if kernel does not have kwargs.
|
| 2888 |
+
"""
|
| 2889 |
+
warn_args = args is not None and len(args) > 0
|
| 2890 |
+
warn_kwargs = kwargs is not None and len(kwargs) > 0
|
| 2891 |
+
if warn_args and warn_kwargs:
|
| 2892 |
+
msg = "args and kwargs"
|
| 2893 |
+
elif warn_args:
|
| 2894 |
+
msg = "args"
|
| 2895 |
+
elif warn_kwargs:
|
| 2896 |
+
msg = "kwargs"
|
| 2897 |
+
else:
|
| 2898 |
+
return
|
| 2899 |
+
warnings.warn(
|
| 2900 |
+
f"Passing additional {msg} to {cls.__name__}.{kernel} has "
|
| 2901 |
+
"no impact on the result and is deprecated. This will "
|
| 2902 |
+
"raise a TypeError in a future version of pandas.",
|
| 2903 |
+
category=FutureWarning,
|
| 2904 |
+
stacklevel=find_stack_level(),
|
| 2905 |
+
)
|
| 2906 |
+
|
| 2907 |
+
|
| 2908 |
+
def _apply(
|
| 2909 |
+
grouped: GroupBy, how: Callable, *args, include_groups: bool, **kwargs
|
| 2910 |
+
) -> DataFrame:
|
| 2911 |
+
# GH#7155 - rewrite warning to appear as if it came from `.resample`
|
| 2912 |
+
target_message = "DataFrameGroupBy.apply operated on the grouping columns"
|
| 2913 |
+
new_message = _apply_groupings_depr.format("DataFrameGroupBy", "resample")
|
| 2914 |
+
with rewrite_warning(
|
| 2915 |
+
target_message=target_message,
|
| 2916 |
+
target_category=DeprecationWarning,
|
| 2917 |
+
new_message=new_message,
|
| 2918 |
+
):
|
| 2919 |
+
result = grouped.apply(how, *args, include_groups=include_groups, **kwargs)
|
| 2920 |
+
return result
|
videollama2/lib/python3.10/site-packages/pandas/core/sample.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Module containing utilities for NDFrame.sample() and .GroupBy.sample()
|
| 3 |
+
"""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from typing import TYPE_CHECKING
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
from pandas._libs import lib
|
| 11 |
+
|
| 12 |
+
from pandas.core.dtypes.generic import (
|
| 13 |
+
ABCDataFrame,
|
| 14 |
+
ABCSeries,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
if TYPE_CHECKING:
|
| 18 |
+
from pandas._typing import AxisInt
|
| 19 |
+
|
| 20 |
+
from pandas.core.generic import NDFrame
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def preprocess_weights(obj: NDFrame, weights, axis: AxisInt) -> np.ndarray:
|
| 24 |
+
"""
|
| 25 |
+
Process and validate the `weights` argument to `NDFrame.sample` and
|
| 26 |
+
`.GroupBy.sample`.
|
| 27 |
+
|
| 28 |
+
Returns `weights` as an ndarray[np.float64], validated except for normalizing
|
| 29 |
+
weights (because that must be done groupwise in groupby sampling).
|
| 30 |
+
"""
|
| 31 |
+
# If a series, align with frame
|
| 32 |
+
if isinstance(weights, ABCSeries):
|
| 33 |
+
weights = weights.reindex(obj.axes[axis])
|
| 34 |
+
|
| 35 |
+
# Strings acceptable if a dataframe and axis = 0
|
| 36 |
+
if isinstance(weights, str):
|
| 37 |
+
if isinstance(obj, ABCDataFrame):
|
| 38 |
+
if axis == 0:
|
| 39 |
+
try:
|
| 40 |
+
weights = obj[weights]
|
| 41 |
+
except KeyError as err:
|
| 42 |
+
raise KeyError(
|
| 43 |
+
"String passed to weights not a valid column"
|
| 44 |
+
) from err
|
| 45 |
+
else:
|
| 46 |
+
raise ValueError(
|
| 47 |
+
"Strings can only be passed to "
|
| 48 |
+
"weights when sampling from rows on "
|
| 49 |
+
"a DataFrame"
|
| 50 |
+
)
|
| 51 |
+
else:
|
| 52 |
+
raise ValueError(
|
| 53 |
+
"Strings cannot be passed as weights when sampling from a Series."
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
if isinstance(obj, ABCSeries):
|
| 57 |
+
func = obj._constructor
|
| 58 |
+
else:
|
| 59 |
+
func = obj._constructor_sliced
|
| 60 |
+
|
| 61 |
+
weights = func(weights, dtype="float64")._values
|
| 62 |
+
|
| 63 |
+
if len(weights) != obj.shape[axis]:
|
| 64 |
+
raise ValueError("Weights and axis to be sampled must be of same length")
|
| 65 |
+
|
| 66 |
+
if lib.has_infs(weights):
|
| 67 |
+
raise ValueError("weight vector may not include `inf` values")
|
| 68 |
+
|
| 69 |
+
if (weights < 0).any():
|
| 70 |
+
raise ValueError("weight vector many not include negative values")
|
| 71 |
+
|
| 72 |
+
missing = np.isnan(weights)
|
| 73 |
+
if missing.any():
|
| 74 |
+
# Don't modify weights in place
|
| 75 |
+
weights = weights.copy()
|
| 76 |
+
weights[missing] = 0
|
| 77 |
+
return weights
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def process_sampling_size(
|
| 81 |
+
n: int | None, frac: float | None, replace: bool
|
| 82 |
+
) -> int | None:
|
| 83 |
+
"""
|
| 84 |
+
Process and validate the `n` and `frac` arguments to `NDFrame.sample` and
|
| 85 |
+
`.GroupBy.sample`.
|
| 86 |
+
|
| 87 |
+
Returns None if `frac` should be used (variable sampling sizes), otherwise returns
|
| 88 |
+
the constant sampling size.
|
| 89 |
+
"""
|
| 90 |
+
# If no frac or n, default to n=1.
|
| 91 |
+
if n is None and frac is None:
|
| 92 |
+
n = 1
|
| 93 |
+
elif n is not None and frac is not None:
|
| 94 |
+
raise ValueError("Please enter a value for `frac` OR `n`, not both")
|
| 95 |
+
elif n is not None:
|
| 96 |
+
if n < 0:
|
| 97 |
+
raise ValueError(
|
| 98 |
+
"A negative number of rows requested. Please provide `n` >= 0."
|
| 99 |
+
)
|
| 100 |
+
if n % 1 != 0:
|
| 101 |
+
raise ValueError("Only integers accepted as `n` values")
|
| 102 |
+
else:
|
| 103 |
+
assert frac is not None # for mypy
|
| 104 |
+
if frac > 1 and not replace:
|
| 105 |
+
raise ValueError(
|
| 106 |
+
"Replace has to be set to `True` when "
|
| 107 |
+
"upsampling the population `frac` > 1."
|
| 108 |
+
)
|
| 109 |
+
if frac < 0:
|
| 110 |
+
raise ValueError(
|
| 111 |
+
"A negative number of rows requested. Please provide `frac` >= 0."
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
return n
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def sample(
|
| 118 |
+
obj_len: int,
|
| 119 |
+
size: int,
|
| 120 |
+
replace: bool,
|
| 121 |
+
weights: np.ndarray | None,
|
| 122 |
+
random_state: np.random.RandomState | np.random.Generator,
|
| 123 |
+
) -> np.ndarray:
|
| 124 |
+
"""
|
| 125 |
+
Randomly sample `size` indices in `np.arange(obj_len)`
|
| 126 |
+
|
| 127 |
+
Parameters
|
| 128 |
+
----------
|
| 129 |
+
obj_len : int
|
| 130 |
+
The length of the indices being considered
|
| 131 |
+
size : int
|
| 132 |
+
The number of values to choose
|
| 133 |
+
replace : bool
|
| 134 |
+
Allow or disallow sampling of the same row more than once.
|
| 135 |
+
weights : np.ndarray[np.float64] or None
|
| 136 |
+
If None, equal probability weighting, otherwise weights according
|
| 137 |
+
to the vector normalized
|
| 138 |
+
random_state: np.random.RandomState or np.random.Generator
|
| 139 |
+
State used for the random sampling
|
| 140 |
+
|
| 141 |
+
Returns
|
| 142 |
+
-------
|
| 143 |
+
np.ndarray[np.intp]
|
| 144 |
+
"""
|
| 145 |
+
if weights is not None:
|
| 146 |
+
weight_sum = weights.sum()
|
| 147 |
+
if weight_sum != 0:
|
| 148 |
+
weights = weights / weight_sum
|
| 149 |
+
else:
|
| 150 |
+
raise ValueError("Invalid weights: weights sum to zero")
|
| 151 |
+
|
| 152 |
+
return random_state.choice(obj_len, size=size, replace=replace, p=weights).astype(
|
| 153 |
+
np.intp, copy=False
|
| 154 |
+
)
|
videollama2/lib/python3.10/site-packages/pandas/core/shared_docs.py
ADDED
|
@@ -0,0 +1,952 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
_shared_docs: dict[str, str] = {}
|
| 4 |
+
|
| 5 |
+
_shared_docs[
|
| 6 |
+
"aggregate"
|
| 7 |
+
] = """
|
| 8 |
+
Aggregate using one or more operations over the specified axis.
|
| 9 |
+
|
| 10 |
+
Parameters
|
| 11 |
+
----------
|
| 12 |
+
func : function, str, list or dict
|
| 13 |
+
Function to use for aggregating the data. If a function, must either
|
| 14 |
+
work when passed a {klass} or when passed to {klass}.apply.
|
| 15 |
+
|
| 16 |
+
Accepted combinations are:
|
| 17 |
+
|
| 18 |
+
- function
|
| 19 |
+
- string function name
|
| 20 |
+
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
|
| 21 |
+
- dict of axis labels -> functions, function names or list of such.
|
| 22 |
+
{axis}
|
| 23 |
+
*args
|
| 24 |
+
Positional arguments to pass to `func`.
|
| 25 |
+
**kwargs
|
| 26 |
+
Keyword arguments to pass to `func`.
|
| 27 |
+
|
| 28 |
+
Returns
|
| 29 |
+
-------
|
| 30 |
+
scalar, Series or DataFrame
|
| 31 |
+
|
| 32 |
+
The return can be:
|
| 33 |
+
|
| 34 |
+
* scalar : when Series.agg is called with single function
|
| 35 |
+
* Series : when DataFrame.agg is called with a single function
|
| 36 |
+
* DataFrame : when DataFrame.agg is called with several functions
|
| 37 |
+
{see_also}
|
| 38 |
+
Notes
|
| 39 |
+
-----
|
| 40 |
+
The aggregation operations are always performed over an axis, either the
|
| 41 |
+
index (default) or the column axis. This behavior is different from
|
| 42 |
+
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
|
| 43 |
+
`var`), where the default is to compute the aggregation of the flattened
|
| 44 |
+
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
|
| 45 |
+
``numpy.mean(arr_2d, axis=0)``.
|
| 46 |
+
|
| 47 |
+
`agg` is an alias for `aggregate`. Use the alias.
|
| 48 |
+
|
| 49 |
+
Functions that mutate the passed object can produce unexpected
|
| 50 |
+
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
|
| 51 |
+
for more details.
|
| 52 |
+
|
| 53 |
+
A passed user-defined-function will be passed a Series for evaluation.
|
| 54 |
+
{examples}"""
|
| 55 |
+
|
| 56 |
+
_shared_docs[
|
| 57 |
+
"compare"
|
| 58 |
+
] = """
|
| 59 |
+
Compare to another {klass} and show the differences.
|
| 60 |
+
|
| 61 |
+
Parameters
|
| 62 |
+
----------
|
| 63 |
+
other : {klass}
|
| 64 |
+
Object to compare with.
|
| 65 |
+
|
| 66 |
+
align_axis : {{0 or 'index', 1 or 'columns'}}, default 1
|
| 67 |
+
Determine which axis to align the comparison on.
|
| 68 |
+
|
| 69 |
+
* 0, or 'index' : Resulting differences are stacked vertically
|
| 70 |
+
with rows drawn alternately from self and other.
|
| 71 |
+
* 1, or 'columns' : Resulting differences are aligned horizontally
|
| 72 |
+
with columns drawn alternately from self and other.
|
| 73 |
+
|
| 74 |
+
keep_shape : bool, default False
|
| 75 |
+
If true, all rows and columns are kept.
|
| 76 |
+
Otherwise, only the ones with different values are kept.
|
| 77 |
+
|
| 78 |
+
keep_equal : bool, default False
|
| 79 |
+
If true, the result keeps values that are equal.
|
| 80 |
+
Otherwise, equal values are shown as NaNs.
|
| 81 |
+
|
| 82 |
+
result_names : tuple, default ('self', 'other')
|
| 83 |
+
Set the dataframes names in the comparison.
|
| 84 |
+
|
| 85 |
+
.. versionadded:: 1.5.0
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
_shared_docs[
|
| 89 |
+
"groupby"
|
| 90 |
+
] = """
|
| 91 |
+
Group %(klass)s using a mapper or by a Series of columns.
|
| 92 |
+
|
| 93 |
+
A groupby operation involves some combination of splitting the
|
| 94 |
+
object, applying a function, and combining the results. This can be
|
| 95 |
+
used to group large amounts of data and compute operations on these
|
| 96 |
+
groups.
|
| 97 |
+
|
| 98 |
+
Parameters
|
| 99 |
+
----------
|
| 100 |
+
by : mapping, function, label, pd.Grouper or list of such
|
| 101 |
+
Used to determine the groups for the groupby.
|
| 102 |
+
If ``by`` is a function, it's called on each value of the object's
|
| 103 |
+
index. If a dict or Series is passed, the Series or dict VALUES
|
| 104 |
+
will be used to determine the groups (the Series' values are first
|
| 105 |
+
aligned; see ``.align()`` method). If a list or ndarray of length
|
| 106 |
+
equal to the selected axis is passed (see the `groupby user guide
|
| 107 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#splitting-an-object-into-groups>`_),
|
| 108 |
+
the values are used as-is to determine the groups. A label or list
|
| 109 |
+
of labels may be passed to group by the columns in ``self``.
|
| 110 |
+
Notice that a tuple is interpreted as a (single) key.
|
| 111 |
+
axis : {0 or 'index', 1 or 'columns'}, default 0
|
| 112 |
+
Split along rows (0) or columns (1). For `Series` this parameter
|
| 113 |
+
is unused and defaults to 0.
|
| 114 |
+
|
| 115 |
+
.. deprecated:: 2.1.0
|
| 116 |
+
|
| 117 |
+
Will be removed and behave like axis=0 in a future version.
|
| 118 |
+
For ``axis=1``, do ``frame.T.groupby(...)`` instead.
|
| 119 |
+
|
| 120 |
+
level : int, level name, or sequence of such, default None
|
| 121 |
+
If the axis is a MultiIndex (hierarchical), group by a particular
|
| 122 |
+
level or levels. Do not specify both ``by`` and ``level``.
|
| 123 |
+
as_index : bool, default True
|
| 124 |
+
Return object with group labels as the
|
| 125 |
+
index. Only relevant for DataFrame input. as_index=False is
|
| 126 |
+
effectively "SQL-style" grouped output. This argument has no effect
|
| 127 |
+
on filtrations (see the `filtrations in the user guide
|
| 128 |
+
<https://pandas.pydata.org/docs/dev/user_guide/groupby.html#filtration>`_),
|
| 129 |
+
such as ``head()``, ``tail()``, ``nth()`` and in transformations
|
| 130 |
+
(see the `transformations in the user guide
|
| 131 |
+
<https://pandas.pydata.org/docs/dev/user_guide/groupby.html#transformation>`_).
|
| 132 |
+
sort : bool, default True
|
| 133 |
+
Sort group keys. Get better performance by turning this off.
|
| 134 |
+
Note this does not influence the order of observations within each
|
| 135 |
+
group. Groupby preserves the order of rows within each group. If False,
|
| 136 |
+
the groups will appear in the same order as they did in the original DataFrame.
|
| 137 |
+
This argument has no effect on filtrations (see the `filtrations in the user guide
|
| 138 |
+
<https://pandas.pydata.org/docs/dev/user_guide/groupby.html#filtration>`_),
|
| 139 |
+
such as ``head()``, ``tail()``, ``nth()`` and in transformations
|
| 140 |
+
(see the `transformations in the user guide
|
| 141 |
+
<https://pandas.pydata.org/docs/dev/user_guide/groupby.html#transformation>`_).
|
| 142 |
+
|
| 143 |
+
.. versionchanged:: 2.0.0
|
| 144 |
+
|
| 145 |
+
Specifying ``sort=False`` with an ordered categorical grouper will no
|
| 146 |
+
longer sort the values.
|
| 147 |
+
|
| 148 |
+
group_keys : bool, default True
|
| 149 |
+
When calling apply and the ``by`` argument produces a like-indexed
|
| 150 |
+
(i.e. :ref:`a transform <groupby.transform>`) result, add group keys to
|
| 151 |
+
index to identify pieces. By default group keys are not included
|
| 152 |
+
when the result's index (and column) labels match the inputs, and
|
| 153 |
+
are included otherwise.
|
| 154 |
+
|
| 155 |
+
.. versionchanged:: 1.5.0
|
| 156 |
+
|
| 157 |
+
Warns that ``group_keys`` will no longer be ignored when the
|
| 158 |
+
result from ``apply`` is a like-indexed Series or DataFrame.
|
| 159 |
+
Specify ``group_keys`` explicitly to include the group keys or
|
| 160 |
+
not.
|
| 161 |
+
|
| 162 |
+
.. versionchanged:: 2.0.0
|
| 163 |
+
|
| 164 |
+
``group_keys`` now defaults to ``True``.
|
| 165 |
+
|
| 166 |
+
observed : bool, default False
|
| 167 |
+
This only applies if any of the groupers are Categoricals.
|
| 168 |
+
If True: only show observed values for categorical groupers.
|
| 169 |
+
If False: show all values for categorical groupers.
|
| 170 |
+
|
| 171 |
+
.. deprecated:: 2.1.0
|
| 172 |
+
|
| 173 |
+
The default value will change to True in a future version of pandas.
|
| 174 |
+
|
| 175 |
+
dropna : bool, default True
|
| 176 |
+
If True, and if group keys contain NA values, NA values together
|
| 177 |
+
with row/column will be dropped.
|
| 178 |
+
If False, NA values will also be treated as the key in groups.
|
| 179 |
+
|
| 180 |
+
Returns
|
| 181 |
+
-------
|
| 182 |
+
pandas.api.typing.%(klass)sGroupBy
|
| 183 |
+
Returns a groupby object that contains information about the groups.
|
| 184 |
+
|
| 185 |
+
See Also
|
| 186 |
+
--------
|
| 187 |
+
resample : Convenience method for frequency conversion and resampling
|
| 188 |
+
of time series.
|
| 189 |
+
|
| 190 |
+
Notes
|
| 191 |
+
-----
|
| 192 |
+
See the `user guide
|
| 193 |
+
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`__ for more
|
| 194 |
+
detailed usage and examples, including splitting an object into groups,
|
| 195 |
+
iterating through groups, selecting a group, aggregation, and more.
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
_shared_docs[
|
| 199 |
+
"melt"
|
| 200 |
+
] = """
|
| 201 |
+
Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
|
| 202 |
+
|
| 203 |
+
This function is useful to massage a DataFrame into a format where one
|
| 204 |
+
or more columns are identifier variables (`id_vars`), while all other
|
| 205 |
+
columns, considered measured variables (`value_vars`), are "unpivoted" to
|
| 206 |
+
the row axis, leaving just two non-identifier columns, 'variable' and
|
| 207 |
+
'value'.
|
| 208 |
+
|
| 209 |
+
Parameters
|
| 210 |
+
----------
|
| 211 |
+
id_vars : scalar, tuple, list, or ndarray, optional
|
| 212 |
+
Column(s) to use as identifier variables.
|
| 213 |
+
value_vars : scalar, tuple, list, or ndarray, optional
|
| 214 |
+
Column(s) to unpivot. If not specified, uses all columns that
|
| 215 |
+
are not set as `id_vars`.
|
| 216 |
+
var_name : scalar, default None
|
| 217 |
+
Name to use for the 'variable' column. If None it uses
|
| 218 |
+
``frame.columns.name`` or 'variable'.
|
| 219 |
+
value_name : scalar, default 'value'
|
| 220 |
+
Name to use for the 'value' column, can't be an existing column label.
|
| 221 |
+
col_level : scalar, optional
|
| 222 |
+
If columns are a MultiIndex then use this level to melt.
|
| 223 |
+
ignore_index : bool, default True
|
| 224 |
+
If True, original index is ignored. If False, the original index is retained.
|
| 225 |
+
Index labels will be repeated as necessary.
|
| 226 |
+
|
| 227 |
+
Returns
|
| 228 |
+
-------
|
| 229 |
+
DataFrame
|
| 230 |
+
Unpivoted DataFrame.
|
| 231 |
+
|
| 232 |
+
See Also
|
| 233 |
+
--------
|
| 234 |
+
%(other)s : Identical method.
|
| 235 |
+
pivot_table : Create a spreadsheet-style pivot table as a DataFrame.
|
| 236 |
+
DataFrame.pivot : Return reshaped DataFrame organized
|
| 237 |
+
by given index / column values.
|
| 238 |
+
DataFrame.explode : Explode a DataFrame from list-like
|
| 239 |
+
columns to long format.
|
| 240 |
+
|
| 241 |
+
Notes
|
| 242 |
+
-----
|
| 243 |
+
Reference :ref:`the user guide <reshaping.melt>` for more examples.
|
| 244 |
+
|
| 245 |
+
Examples
|
| 246 |
+
--------
|
| 247 |
+
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
|
| 248 |
+
... 'B': {0: 1, 1: 3, 2: 5},
|
| 249 |
+
... 'C': {0: 2, 1: 4, 2: 6}})
|
| 250 |
+
>>> df
|
| 251 |
+
A B C
|
| 252 |
+
0 a 1 2
|
| 253 |
+
1 b 3 4
|
| 254 |
+
2 c 5 6
|
| 255 |
+
|
| 256 |
+
>>> %(caller)sid_vars=['A'], value_vars=['B'])
|
| 257 |
+
A variable value
|
| 258 |
+
0 a B 1
|
| 259 |
+
1 b B 3
|
| 260 |
+
2 c B 5
|
| 261 |
+
|
| 262 |
+
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
|
| 263 |
+
A variable value
|
| 264 |
+
0 a B 1
|
| 265 |
+
1 b B 3
|
| 266 |
+
2 c B 5
|
| 267 |
+
3 a C 2
|
| 268 |
+
4 b C 4
|
| 269 |
+
5 c C 6
|
| 270 |
+
|
| 271 |
+
The names of 'variable' and 'value' columns can be customized:
|
| 272 |
+
|
| 273 |
+
>>> %(caller)sid_vars=['A'], value_vars=['B'],
|
| 274 |
+
... var_name='myVarname', value_name='myValname')
|
| 275 |
+
A myVarname myValname
|
| 276 |
+
0 a B 1
|
| 277 |
+
1 b B 3
|
| 278 |
+
2 c B 5
|
| 279 |
+
|
| 280 |
+
Original index values can be kept around:
|
| 281 |
+
|
| 282 |
+
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'], ignore_index=False)
|
| 283 |
+
A variable value
|
| 284 |
+
0 a B 1
|
| 285 |
+
1 b B 3
|
| 286 |
+
2 c B 5
|
| 287 |
+
0 a C 2
|
| 288 |
+
1 b C 4
|
| 289 |
+
2 c C 6
|
| 290 |
+
|
| 291 |
+
If you have multi-index columns:
|
| 292 |
+
|
| 293 |
+
>>> df.columns = [list('ABC'), list('DEF')]
|
| 294 |
+
>>> df
|
| 295 |
+
A B C
|
| 296 |
+
D E F
|
| 297 |
+
0 a 1 2
|
| 298 |
+
1 b 3 4
|
| 299 |
+
2 c 5 6
|
| 300 |
+
|
| 301 |
+
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
|
| 302 |
+
A variable value
|
| 303 |
+
0 a B 1
|
| 304 |
+
1 b B 3
|
| 305 |
+
2 c B 5
|
| 306 |
+
|
| 307 |
+
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
|
| 308 |
+
(A, D) variable_0 variable_1 value
|
| 309 |
+
0 a B E 1
|
| 310 |
+
1 b B E 3
|
| 311 |
+
2 c B E 5
|
| 312 |
+
"""
|
| 313 |
+
|
| 314 |
+
_shared_docs[
|
| 315 |
+
"transform"
|
| 316 |
+
] = """
|
| 317 |
+
Call ``func`` on self producing a {klass} with the same axis shape as self.
|
| 318 |
+
|
| 319 |
+
Parameters
|
| 320 |
+
----------
|
| 321 |
+
func : function, str, list-like or dict-like
|
| 322 |
+
Function to use for transforming the data. If a function, must either
|
| 323 |
+
work when passed a {klass} or when passed to {klass}.apply. If func
|
| 324 |
+
is both list-like and dict-like, dict-like behavior takes precedence.
|
| 325 |
+
|
| 326 |
+
Accepted combinations are:
|
| 327 |
+
|
| 328 |
+
- function
|
| 329 |
+
- string function name
|
| 330 |
+
- list-like of functions and/or function names, e.g. ``[np.exp, 'sqrt']``
|
| 331 |
+
- dict-like of axis labels -> functions, function names or list-like of such.
|
| 332 |
+
{axis}
|
| 333 |
+
*args
|
| 334 |
+
Positional arguments to pass to `func`.
|
| 335 |
+
**kwargs
|
| 336 |
+
Keyword arguments to pass to `func`.
|
| 337 |
+
|
| 338 |
+
Returns
|
| 339 |
+
-------
|
| 340 |
+
{klass}
|
| 341 |
+
A {klass} that must have the same length as self.
|
| 342 |
+
|
| 343 |
+
Raises
|
| 344 |
+
------
|
| 345 |
+
ValueError : If the returned {klass} has a different length than self.
|
| 346 |
+
|
| 347 |
+
See Also
|
| 348 |
+
--------
|
| 349 |
+
{klass}.agg : Only perform aggregating type operations.
|
| 350 |
+
{klass}.apply : Invoke function on a {klass}.
|
| 351 |
+
|
| 352 |
+
Notes
|
| 353 |
+
-----
|
| 354 |
+
Functions that mutate the passed object can produce unexpected
|
| 355 |
+
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
|
| 356 |
+
for more details.
|
| 357 |
+
|
| 358 |
+
Examples
|
| 359 |
+
--------
|
| 360 |
+
>>> df = pd.DataFrame({{'A': range(3), 'B': range(1, 4)}})
|
| 361 |
+
>>> df
|
| 362 |
+
A B
|
| 363 |
+
0 0 1
|
| 364 |
+
1 1 2
|
| 365 |
+
2 2 3
|
| 366 |
+
>>> df.transform(lambda x: x + 1)
|
| 367 |
+
A B
|
| 368 |
+
0 1 2
|
| 369 |
+
1 2 3
|
| 370 |
+
2 3 4
|
| 371 |
+
|
| 372 |
+
Even though the resulting {klass} must have the same length as the
|
| 373 |
+
input {klass}, it is possible to provide several input functions:
|
| 374 |
+
|
| 375 |
+
>>> s = pd.Series(range(3))
|
| 376 |
+
>>> s
|
| 377 |
+
0 0
|
| 378 |
+
1 1
|
| 379 |
+
2 2
|
| 380 |
+
dtype: int64
|
| 381 |
+
>>> s.transform([np.sqrt, np.exp])
|
| 382 |
+
sqrt exp
|
| 383 |
+
0 0.000000 1.000000
|
| 384 |
+
1 1.000000 2.718282
|
| 385 |
+
2 1.414214 7.389056
|
| 386 |
+
|
| 387 |
+
You can call transform on a GroupBy object:
|
| 388 |
+
|
| 389 |
+
>>> df = pd.DataFrame({{
|
| 390 |
+
... "Date": [
|
| 391 |
+
... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05",
|
| 392 |
+
... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05"],
|
| 393 |
+
... "Data": [5, 8, 6, 1, 50, 100, 60, 120],
|
| 394 |
+
... }})
|
| 395 |
+
>>> df
|
| 396 |
+
Date Data
|
| 397 |
+
0 2015-05-08 5
|
| 398 |
+
1 2015-05-07 8
|
| 399 |
+
2 2015-05-06 6
|
| 400 |
+
3 2015-05-05 1
|
| 401 |
+
4 2015-05-08 50
|
| 402 |
+
5 2015-05-07 100
|
| 403 |
+
6 2015-05-06 60
|
| 404 |
+
7 2015-05-05 120
|
| 405 |
+
>>> df.groupby('Date')['Data'].transform('sum')
|
| 406 |
+
0 55
|
| 407 |
+
1 108
|
| 408 |
+
2 66
|
| 409 |
+
3 121
|
| 410 |
+
4 55
|
| 411 |
+
5 108
|
| 412 |
+
6 66
|
| 413 |
+
7 121
|
| 414 |
+
Name: Data, dtype: int64
|
| 415 |
+
|
| 416 |
+
>>> df = pd.DataFrame({{
|
| 417 |
+
... "c": [1, 1, 1, 2, 2, 2, 2],
|
| 418 |
+
... "type": ["m", "n", "o", "m", "m", "n", "n"]
|
| 419 |
+
... }})
|
| 420 |
+
>>> df
|
| 421 |
+
c type
|
| 422 |
+
0 1 m
|
| 423 |
+
1 1 n
|
| 424 |
+
2 1 o
|
| 425 |
+
3 2 m
|
| 426 |
+
4 2 m
|
| 427 |
+
5 2 n
|
| 428 |
+
6 2 n
|
| 429 |
+
>>> df['size'] = df.groupby('c')['type'].transform(len)
|
| 430 |
+
>>> df
|
| 431 |
+
c type size
|
| 432 |
+
0 1 m 3
|
| 433 |
+
1 1 n 3
|
| 434 |
+
2 1 o 3
|
| 435 |
+
3 2 m 4
|
| 436 |
+
4 2 m 4
|
| 437 |
+
5 2 n 4
|
| 438 |
+
6 2 n 4
|
| 439 |
+
"""
|
| 440 |
+
|
| 441 |
+
_shared_docs[
|
| 442 |
+
"storage_options"
|
| 443 |
+
] = """storage_options : dict, optional
|
| 444 |
+
Extra options that make sense for a particular storage connection, e.g.
|
| 445 |
+
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
|
| 446 |
+
are forwarded to ``urllib.request.Request`` as header options. For other
|
| 447 |
+
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
|
| 448 |
+
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
|
| 449 |
+
details, and for more examples on storage options refer `here
|
| 450 |
+
<https://pandas.pydata.org/docs/user_guide/io.html?
|
| 451 |
+
highlight=storage_options#reading-writing-remote-files>`_."""
|
| 452 |
+
|
| 453 |
+
_shared_docs[
|
| 454 |
+
"compression_options"
|
| 455 |
+
] = """compression : str or dict, default 'infer'
|
| 456 |
+
For on-the-fly compression of the output data. If 'infer' and '%s' is
|
| 457 |
+
path-like, then detect compression from the following extensions: '.gz',
|
| 458 |
+
'.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'
|
| 459 |
+
(otherwise no compression).
|
| 460 |
+
Set to ``None`` for no compression.
|
| 461 |
+
Can also be a dict with key ``'method'`` set
|
| 462 |
+
to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and
|
| 463 |
+
other key-value pairs are forwarded to
|
| 464 |
+
``zipfile.ZipFile``, ``gzip.GzipFile``,
|
| 465 |
+
``bz2.BZ2File``, ``zstandard.ZstdCompressor``, ``lzma.LZMAFile`` or
|
| 466 |
+
``tarfile.TarFile``, respectively.
|
| 467 |
+
As an example, the following could be passed for faster compression and to create
|
| 468 |
+
a reproducible gzip archive:
|
| 469 |
+
``compression={'method': 'gzip', 'compresslevel': 1, 'mtime': 1}``.
|
| 470 |
+
|
| 471 |
+
.. versionadded:: 1.5.0
|
| 472 |
+
Added support for `.tar` files."""
|
| 473 |
+
|
| 474 |
+
_shared_docs[
|
| 475 |
+
"decompression_options"
|
| 476 |
+
] = """compression : str or dict, default 'infer'
|
| 477 |
+
For on-the-fly decompression of on-disk data. If 'infer' and '%s' is
|
| 478 |
+
path-like, then detect compression from the following extensions: '.gz',
|
| 479 |
+
'.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'
|
| 480 |
+
(otherwise no compression).
|
| 481 |
+
If using 'zip' or 'tar', the ZIP file must contain only one data file to be read in.
|
| 482 |
+
Set to ``None`` for no decompression.
|
| 483 |
+
Can also be a dict with key ``'method'`` set
|
| 484 |
+
to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and
|
| 485 |
+
other key-value pairs are forwarded to
|
| 486 |
+
``zipfile.ZipFile``, ``gzip.GzipFile``,
|
| 487 |
+
``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or
|
| 488 |
+
``tarfile.TarFile``, respectively.
|
| 489 |
+
As an example, the following could be passed for Zstandard decompression using a
|
| 490 |
+
custom compression dictionary:
|
| 491 |
+
``compression={'method': 'zstd', 'dict_data': my_compression_dict}``.
|
| 492 |
+
|
| 493 |
+
.. versionadded:: 1.5.0
|
| 494 |
+
Added support for `.tar` files."""
|
| 495 |
+
|
| 496 |
+
_shared_docs[
|
| 497 |
+
"replace"
|
| 498 |
+
] = """
|
| 499 |
+
Replace values given in `to_replace` with `value`.
|
| 500 |
+
|
| 501 |
+
Values of the {klass} are replaced with other values dynamically.
|
| 502 |
+
This differs from updating with ``.loc`` or ``.iloc``, which require
|
| 503 |
+
you to specify a location to update with some value.
|
| 504 |
+
|
| 505 |
+
Parameters
|
| 506 |
+
----------
|
| 507 |
+
to_replace : str, regex, list, dict, Series, int, float, or None
|
| 508 |
+
How to find the values that will be replaced.
|
| 509 |
+
|
| 510 |
+
* numeric, str or regex:
|
| 511 |
+
|
| 512 |
+
- numeric: numeric values equal to `to_replace` will be
|
| 513 |
+
replaced with `value`
|
| 514 |
+
- str: string exactly matching `to_replace` will be replaced
|
| 515 |
+
with `value`
|
| 516 |
+
- regex: regexs matching `to_replace` will be replaced with
|
| 517 |
+
`value`
|
| 518 |
+
|
| 519 |
+
* list of str, regex, or numeric:
|
| 520 |
+
|
| 521 |
+
- First, if `to_replace` and `value` are both lists, they
|
| 522 |
+
**must** be the same length.
|
| 523 |
+
- Second, if ``regex=True`` then all of the strings in **both**
|
| 524 |
+
lists will be interpreted as regexs otherwise they will match
|
| 525 |
+
directly. This doesn't matter much for `value` since there
|
| 526 |
+
are only a few possible substitution regexes you can use.
|
| 527 |
+
- str, regex and numeric rules apply as above.
|
| 528 |
+
|
| 529 |
+
* dict:
|
| 530 |
+
|
| 531 |
+
- Dicts can be used to specify different replacement values
|
| 532 |
+
for different existing values. For example,
|
| 533 |
+
``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and
|
| 534 |
+
'y' with 'z'. To use a dict in this way, the optional `value`
|
| 535 |
+
parameter should not be given.
|
| 536 |
+
- For a DataFrame a dict can specify that different values
|
| 537 |
+
should be replaced in different columns. For example,
|
| 538 |
+
``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a'
|
| 539 |
+
and the value 'z' in column 'b' and replaces these values
|
| 540 |
+
with whatever is specified in `value`. The `value` parameter
|
| 541 |
+
should not be ``None`` in this case. You can treat this as a
|
| 542 |
+
special case of passing two lists except that you are
|
| 543 |
+
specifying the column to search in.
|
| 544 |
+
- For a DataFrame nested dictionaries, e.g.,
|
| 545 |
+
``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column
|
| 546 |
+
'a' for the value 'b' and replace it with NaN. The optional `value`
|
| 547 |
+
parameter should not be specified to use a nested dict in this
|
| 548 |
+
way. You can nest regular expressions as well. Note that
|
| 549 |
+
column names (the top-level dictionary keys in a nested
|
| 550 |
+
dictionary) **cannot** be regular expressions.
|
| 551 |
+
|
| 552 |
+
* None:
|
| 553 |
+
|
| 554 |
+
- This means that the `regex` argument must be a string,
|
| 555 |
+
compiled regular expression, or list, dict, ndarray or
|
| 556 |
+
Series of such elements. If `value` is also ``None`` then
|
| 557 |
+
this **must** be a nested dictionary or Series.
|
| 558 |
+
|
| 559 |
+
See the examples section for examples of each of these.
|
| 560 |
+
value : scalar, dict, list, str, regex, default None
|
| 561 |
+
Value to replace any values matching `to_replace` with.
|
| 562 |
+
For a DataFrame a dict of values can be used to specify which
|
| 563 |
+
value to use for each column (columns not in the dict will not be
|
| 564 |
+
filled). Regular expressions, strings and lists or dicts of such
|
| 565 |
+
objects are also allowed.
|
| 566 |
+
{inplace}
|
| 567 |
+
limit : int, default None
|
| 568 |
+
Maximum size gap to forward or backward fill.
|
| 569 |
+
|
| 570 |
+
.. deprecated:: 2.1.0
|
| 571 |
+
regex : bool or same types as `to_replace`, default False
|
| 572 |
+
Whether to interpret `to_replace` and/or `value` as regular
|
| 573 |
+
expressions. Alternatively, this could be a regular expression or a
|
| 574 |
+
list, dict, or array of regular expressions in which case
|
| 575 |
+
`to_replace` must be ``None``.
|
| 576 |
+
method : {{'pad', 'ffill', 'bfill'}}
|
| 577 |
+
The method to use when for replacement, when `to_replace` is a
|
| 578 |
+
scalar, list or tuple and `value` is ``None``.
|
| 579 |
+
|
| 580 |
+
.. deprecated:: 2.1.0
|
| 581 |
+
|
| 582 |
+
Returns
|
| 583 |
+
-------
|
| 584 |
+
{klass}
|
| 585 |
+
Object after replacement.
|
| 586 |
+
|
| 587 |
+
Raises
|
| 588 |
+
------
|
| 589 |
+
AssertionError
|
| 590 |
+
* If `regex` is not a ``bool`` and `to_replace` is not
|
| 591 |
+
``None``.
|
| 592 |
+
|
| 593 |
+
TypeError
|
| 594 |
+
* If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
|
| 595 |
+
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
|
| 596 |
+
``dict``, ``ndarray``, or ``Series``
|
| 597 |
+
* If `to_replace` is ``None`` and `regex` is not compilable
|
| 598 |
+
into a regular expression or is a list, dict, ndarray, or
|
| 599 |
+
Series.
|
| 600 |
+
* When replacing multiple ``bool`` or ``datetime64`` objects and
|
| 601 |
+
the arguments to `to_replace` does not match the type of the
|
| 602 |
+
value being replaced
|
| 603 |
+
|
| 604 |
+
ValueError
|
| 605 |
+
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
|
| 606 |
+
`value` but they are not the same length.
|
| 607 |
+
|
| 608 |
+
See Also
|
| 609 |
+
--------
|
| 610 |
+
Series.fillna : Fill NA values.
|
| 611 |
+
DataFrame.fillna : Fill NA values.
|
| 612 |
+
Series.where : Replace values based on boolean condition.
|
| 613 |
+
DataFrame.where : Replace values based on boolean condition.
|
| 614 |
+
DataFrame.map: Apply a function to a Dataframe elementwise.
|
| 615 |
+
Series.map: Map values of Series according to an input mapping or function.
|
| 616 |
+
Series.str.replace : Simple string replacement.
|
| 617 |
+
|
| 618 |
+
Notes
|
| 619 |
+
-----
|
| 620 |
+
* Regex substitution is performed under the hood with ``re.sub``. The
|
| 621 |
+
rules for substitution for ``re.sub`` are the same.
|
| 622 |
+
* Regular expressions will only substitute on strings, meaning you
|
| 623 |
+
cannot provide, for example, a regular expression matching floating
|
| 624 |
+
point numbers and expect the columns in your frame that have a
|
| 625 |
+
numeric dtype to be matched. However, if those floating point
|
| 626 |
+
numbers *are* strings, then you can do this.
|
| 627 |
+
* This method has *a lot* of options. You are encouraged to experiment
|
| 628 |
+
and play with this method to gain intuition about how it works.
|
| 629 |
+
* When dict is used as the `to_replace` value, it is like
|
| 630 |
+
key(s) in the dict are the to_replace part and
|
| 631 |
+
value(s) in the dict are the value parameter.
|
| 632 |
+
|
| 633 |
+
Examples
|
| 634 |
+
--------
|
| 635 |
+
|
| 636 |
+
**Scalar `to_replace` and `value`**
|
| 637 |
+
|
| 638 |
+
>>> s = pd.Series([1, 2, 3, 4, 5])
|
| 639 |
+
>>> s.replace(1, 5)
|
| 640 |
+
0 5
|
| 641 |
+
1 2
|
| 642 |
+
2 3
|
| 643 |
+
3 4
|
| 644 |
+
4 5
|
| 645 |
+
dtype: int64
|
| 646 |
+
|
| 647 |
+
>>> df = pd.DataFrame({{'A': [0, 1, 2, 3, 4],
|
| 648 |
+
... 'B': [5, 6, 7, 8, 9],
|
| 649 |
+
... 'C': ['a', 'b', 'c', 'd', 'e']}})
|
| 650 |
+
>>> df.replace(0, 5)
|
| 651 |
+
A B C
|
| 652 |
+
0 5 5 a
|
| 653 |
+
1 1 6 b
|
| 654 |
+
2 2 7 c
|
| 655 |
+
3 3 8 d
|
| 656 |
+
4 4 9 e
|
| 657 |
+
|
| 658 |
+
**List-like `to_replace`**
|
| 659 |
+
|
| 660 |
+
>>> df.replace([0, 1, 2, 3], 4)
|
| 661 |
+
A B C
|
| 662 |
+
0 4 5 a
|
| 663 |
+
1 4 6 b
|
| 664 |
+
2 4 7 c
|
| 665 |
+
3 4 8 d
|
| 666 |
+
4 4 9 e
|
| 667 |
+
|
| 668 |
+
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
|
| 669 |
+
A B C
|
| 670 |
+
0 4 5 a
|
| 671 |
+
1 3 6 b
|
| 672 |
+
2 2 7 c
|
| 673 |
+
3 1 8 d
|
| 674 |
+
4 4 9 e
|
| 675 |
+
|
| 676 |
+
>>> s.replace([1, 2], method='bfill')
|
| 677 |
+
0 3
|
| 678 |
+
1 3
|
| 679 |
+
2 3
|
| 680 |
+
3 4
|
| 681 |
+
4 5
|
| 682 |
+
dtype: int64
|
| 683 |
+
|
| 684 |
+
**dict-like `to_replace`**
|
| 685 |
+
|
| 686 |
+
>>> df.replace({{0: 10, 1: 100}})
|
| 687 |
+
A B C
|
| 688 |
+
0 10 5 a
|
| 689 |
+
1 100 6 b
|
| 690 |
+
2 2 7 c
|
| 691 |
+
3 3 8 d
|
| 692 |
+
4 4 9 e
|
| 693 |
+
|
| 694 |
+
>>> df.replace({{'A': 0, 'B': 5}}, 100)
|
| 695 |
+
A B C
|
| 696 |
+
0 100 100 a
|
| 697 |
+
1 1 6 b
|
| 698 |
+
2 2 7 c
|
| 699 |
+
3 3 8 d
|
| 700 |
+
4 4 9 e
|
| 701 |
+
|
| 702 |
+
>>> df.replace({{'A': {{0: 100, 4: 400}}}})
|
| 703 |
+
A B C
|
| 704 |
+
0 100 5 a
|
| 705 |
+
1 1 6 b
|
| 706 |
+
2 2 7 c
|
| 707 |
+
3 3 8 d
|
| 708 |
+
4 400 9 e
|
| 709 |
+
|
| 710 |
+
**Regular expression `to_replace`**
|
| 711 |
+
|
| 712 |
+
>>> df = pd.DataFrame({{'A': ['bat', 'foo', 'bait'],
|
| 713 |
+
... 'B': ['abc', 'bar', 'xyz']}})
|
| 714 |
+
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
|
| 715 |
+
A B
|
| 716 |
+
0 new abc
|
| 717 |
+
1 foo new
|
| 718 |
+
2 bait xyz
|
| 719 |
+
|
| 720 |
+
>>> df.replace({{'A': r'^ba.$'}}, {{'A': 'new'}}, regex=True)
|
| 721 |
+
A B
|
| 722 |
+
0 new abc
|
| 723 |
+
1 foo bar
|
| 724 |
+
2 bait xyz
|
| 725 |
+
|
| 726 |
+
>>> df.replace(regex=r'^ba.$', value='new')
|
| 727 |
+
A B
|
| 728 |
+
0 new abc
|
| 729 |
+
1 foo new
|
| 730 |
+
2 bait xyz
|
| 731 |
+
|
| 732 |
+
>>> df.replace(regex={{r'^ba.$': 'new', 'foo': 'xyz'}})
|
| 733 |
+
A B
|
| 734 |
+
0 new abc
|
| 735 |
+
1 xyz new
|
| 736 |
+
2 bait xyz
|
| 737 |
+
|
| 738 |
+
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
|
| 739 |
+
A B
|
| 740 |
+
0 new abc
|
| 741 |
+
1 new new
|
| 742 |
+
2 bait xyz
|
| 743 |
+
|
| 744 |
+
Compare the behavior of ``s.replace({{'a': None}})`` and
|
| 745 |
+
``s.replace('a', None)`` to understand the peculiarities
|
| 746 |
+
of the `to_replace` parameter:
|
| 747 |
+
|
| 748 |
+
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
|
| 749 |
+
|
| 750 |
+
When one uses a dict as the `to_replace` value, it is like the
|
| 751 |
+
value(s) in the dict are equal to the `value` parameter.
|
| 752 |
+
``s.replace({{'a': None}})`` is equivalent to
|
| 753 |
+
``s.replace(to_replace={{'a': None}}, value=None, method=None)``:
|
| 754 |
+
|
| 755 |
+
>>> s.replace({{'a': None}})
|
| 756 |
+
0 10
|
| 757 |
+
1 None
|
| 758 |
+
2 None
|
| 759 |
+
3 b
|
| 760 |
+
4 None
|
| 761 |
+
dtype: object
|
| 762 |
+
|
| 763 |
+
When ``value`` is not explicitly passed and `to_replace` is a scalar, list
|
| 764 |
+
or tuple, `replace` uses the method parameter (default 'pad') to do the
|
| 765 |
+
replacement. So this is why the 'a' values are being replaced by 10
|
| 766 |
+
in rows 1 and 2 and 'b' in row 4 in this case.
|
| 767 |
+
|
| 768 |
+
>>> s.replace('a')
|
| 769 |
+
0 10
|
| 770 |
+
1 10
|
| 771 |
+
2 10
|
| 772 |
+
3 b
|
| 773 |
+
4 b
|
| 774 |
+
dtype: object
|
| 775 |
+
|
| 776 |
+
.. deprecated:: 2.1.0
|
| 777 |
+
The 'method' parameter and padding behavior are deprecated.
|
| 778 |
+
|
| 779 |
+
On the other hand, if ``None`` is explicitly passed for ``value``, it will
|
| 780 |
+
be respected:
|
| 781 |
+
|
| 782 |
+
>>> s.replace('a', None)
|
| 783 |
+
0 10
|
| 784 |
+
1 None
|
| 785 |
+
2 None
|
| 786 |
+
3 b
|
| 787 |
+
4 None
|
| 788 |
+
dtype: object
|
| 789 |
+
|
| 790 |
+
.. versionchanged:: 1.4.0
|
| 791 |
+
Previously the explicit ``None`` was silently ignored.
|
| 792 |
+
|
| 793 |
+
When ``regex=True``, ``value`` is not ``None`` and `to_replace` is a string,
|
| 794 |
+
the replacement will be applied in all columns of the DataFrame.
|
| 795 |
+
|
| 796 |
+
>>> df = pd.DataFrame({{'A': [0, 1, 2, 3, 4],
|
| 797 |
+
... 'B': ['a', 'b', 'c', 'd', 'e'],
|
| 798 |
+
... 'C': ['f', 'g', 'h', 'i', 'j']}})
|
| 799 |
+
|
| 800 |
+
>>> df.replace(to_replace='^[a-g]', value='e', regex=True)
|
| 801 |
+
A B C
|
| 802 |
+
0 0 e e
|
| 803 |
+
1 1 e e
|
| 804 |
+
2 2 e h
|
| 805 |
+
3 3 e i
|
| 806 |
+
4 4 e j
|
| 807 |
+
|
| 808 |
+
If ``value`` is not ``None`` and `to_replace` is a dictionary, the dictionary
|
| 809 |
+
keys will be the DataFrame columns that the replacement will be applied.
|
| 810 |
+
|
| 811 |
+
>>> df.replace(to_replace={{'B': '^[a-c]', 'C': '^[h-j]'}}, value='e', regex=True)
|
| 812 |
+
A B C
|
| 813 |
+
0 0 e f
|
| 814 |
+
1 1 e g
|
| 815 |
+
2 2 e e
|
| 816 |
+
3 3 d e
|
| 817 |
+
4 4 e e
|
| 818 |
+
"""
|
| 819 |
+
|
| 820 |
+
_shared_docs[
|
| 821 |
+
"idxmin"
|
| 822 |
+
] = """
|
| 823 |
+
Return index of first occurrence of minimum over requested axis.
|
| 824 |
+
|
| 825 |
+
NA/null values are excluded.
|
| 826 |
+
|
| 827 |
+
Parameters
|
| 828 |
+
----------
|
| 829 |
+
axis : {{0 or 'index', 1 or 'columns'}}, default 0
|
| 830 |
+
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
|
| 831 |
+
skipna : bool, default True
|
| 832 |
+
Exclude NA/null values. If an entire row/column is NA, the result
|
| 833 |
+
will be NA.
|
| 834 |
+
numeric_only : bool, default {numeric_only_default}
|
| 835 |
+
Include only `float`, `int` or `boolean` data.
|
| 836 |
+
|
| 837 |
+
.. versionadded:: 1.5.0
|
| 838 |
+
|
| 839 |
+
Returns
|
| 840 |
+
-------
|
| 841 |
+
Series
|
| 842 |
+
Indexes of minima along the specified axis.
|
| 843 |
+
|
| 844 |
+
Raises
|
| 845 |
+
------
|
| 846 |
+
ValueError
|
| 847 |
+
* If the row/column is empty
|
| 848 |
+
|
| 849 |
+
See Also
|
| 850 |
+
--------
|
| 851 |
+
Series.idxmin : Return index of the minimum element.
|
| 852 |
+
|
| 853 |
+
Notes
|
| 854 |
+
-----
|
| 855 |
+
This method is the DataFrame version of ``ndarray.argmin``.
|
| 856 |
+
|
| 857 |
+
Examples
|
| 858 |
+
--------
|
| 859 |
+
Consider a dataset containing food consumption in Argentina.
|
| 860 |
+
|
| 861 |
+
>>> df = pd.DataFrame({{'consumption': [10.51, 103.11, 55.48],
|
| 862 |
+
... 'co2_emissions': [37.2, 19.66, 1712]}},
|
| 863 |
+
... index=['Pork', 'Wheat Products', 'Beef'])
|
| 864 |
+
|
| 865 |
+
>>> df
|
| 866 |
+
consumption co2_emissions
|
| 867 |
+
Pork 10.51 37.20
|
| 868 |
+
Wheat Products 103.11 19.66
|
| 869 |
+
Beef 55.48 1712.00
|
| 870 |
+
|
| 871 |
+
By default, it returns the index for the minimum value in each column.
|
| 872 |
+
|
| 873 |
+
>>> df.idxmin()
|
| 874 |
+
consumption Pork
|
| 875 |
+
co2_emissions Wheat Products
|
| 876 |
+
dtype: object
|
| 877 |
+
|
| 878 |
+
To return the index for the minimum value in each row, use ``axis="columns"``.
|
| 879 |
+
|
| 880 |
+
>>> df.idxmin(axis="columns")
|
| 881 |
+
Pork consumption
|
| 882 |
+
Wheat Products co2_emissions
|
| 883 |
+
Beef consumption
|
| 884 |
+
dtype: object
|
| 885 |
+
"""
|
| 886 |
+
|
| 887 |
+
_shared_docs[
|
| 888 |
+
"idxmax"
|
| 889 |
+
] = """
|
| 890 |
+
Return index of first occurrence of maximum over requested axis.
|
| 891 |
+
|
| 892 |
+
NA/null values are excluded.
|
| 893 |
+
|
| 894 |
+
Parameters
|
| 895 |
+
----------
|
| 896 |
+
axis : {{0 or 'index', 1 or 'columns'}}, default 0
|
| 897 |
+
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
|
| 898 |
+
skipna : bool, default True
|
| 899 |
+
Exclude NA/null values. If an entire row/column is NA, the result
|
| 900 |
+
will be NA.
|
| 901 |
+
numeric_only : bool, default {numeric_only_default}
|
| 902 |
+
Include only `float`, `int` or `boolean` data.
|
| 903 |
+
|
| 904 |
+
.. versionadded:: 1.5.0
|
| 905 |
+
|
| 906 |
+
Returns
|
| 907 |
+
-------
|
| 908 |
+
Series
|
| 909 |
+
Indexes of maxima along the specified axis.
|
| 910 |
+
|
| 911 |
+
Raises
|
| 912 |
+
------
|
| 913 |
+
ValueError
|
| 914 |
+
* If the row/column is empty
|
| 915 |
+
|
| 916 |
+
See Also
|
| 917 |
+
--------
|
| 918 |
+
Series.idxmax : Return index of the maximum element.
|
| 919 |
+
|
| 920 |
+
Notes
|
| 921 |
+
-----
|
| 922 |
+
This method is the DataFrame version of ``ndarray.argmax``.
|
| 923 |
+
|
| 924 |
+
Examples
|
| 925 |
+
--------
|
| 926 |
+
Consider a dataset containing food consumption in Argentina.
|
| 927 |
+
|
| 928 |
+
>>> df = pd.DataFrame({{'consumption': [10.51, 103.11, 55.48],
|
| 929 |
+
... 'co2_emissions': [37.2, 19.66, 1712]}},
|
| 930 |
+
... index=['Pork', 'Wheat Products', 'Beef'])
|
| 931 |
+
|
| 932 |
+
>>> df
|
| 933 |
+
consumption co2_emissions
|
| 934 |
+
Pork 10.51 37.20
|
| 935 |
+
Wheat Products 103.11 19.66
|
| 936 |
+
Beef 55.48 1712.00
|
| 937 |
+
|
| 938 |
+
By default, it returns the index for the maximum value in each column.
|
| 939 |
+
|
| 940 |
+
>>> df.idxmax()
|
| 941 |
+
consumption Wheat Products
|
| 942 |
+
co2_emissions Beef
|
| 943 |
+
dtype: object
|
| 944 |
+
|
| 945 |
+
To return the index for the maximum value in each row, use ``axis="columns"``.
|
| 946 |
+
|
| 947 |
+
>>> df.idxmax(axis="columns")
|
| 948 |
+
Pork co2_emissions
|
| 949 |
+
Wheat Products consumption
|
| 950 |
+
Beef co2_emissions
|
| 951 |
+
dtype: object
|
| 952 |
+
"""
|
videollama2/lib/python3.10/site-packages/pandas/core/sorting.py
ADDED
|
@@ -0,0 +1,748 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" miscellaneous sorting / groupby utilities """
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from typing import (
|
| 6 |
+
TYPE_CHECKING,
|
| 7 |
+
Callable,
|
| 8 |
+
DefaultDict,
|
| 9 |
+
cast,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
from pandas._libs import (
|
| 15 |
+
algos,
|
| 16 |
+
hashtable,
|
| 17 |
+
lib,
|
| 18 |
+
)
|
| 19 |
+
from pandas._libs.hashtable import unique_label_indices
|
| 20 |
+
|
| 21 |
+
from pandas.core.dtypes.common import (
|
| 22 |
+
ensure_int64,
|
| 23 |
+
ensure_platform_int,
|
| 24 |
+
)
|
| 25 |
+
from pandas.core.dtypes.generic import (
|
| 26 |
+
ABCMultiIndex,
|
| 27 |
+
ABCRangeIndex,
|
| 28 |
+
)
|
| 29 |
+
from pandas.core.dtypes.missing import isna
|
| 30 |
+
|
| 31 |
+
from pandas.core.construction import extract_array
|
| 32 |
+
|
| 33 |
+
if TYPE_CHECKING:
|
| 34 |
+
from collections.abc import (
|
| 35 |
+
Hashable,
|
| 36 |
+
Iterable,
|
| 37 |
+
Sequence,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
from pandas._typing import (
|
| 41 |
+
ArrayLike,
|
| 42 |
+
AxisInt,
|
| 43 |
+
IndexKeyFunc,
|
| 44 |
+
Level,
|
| 45 |
+
NaPosition,
|
| 46 |
+
Shape,
|
| 47 |
+
SortKind,
|
| 48 |
+
npt,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
from pandas import (
|
| 52 |
+
MultiIndex,
|
| 53 |
+
Series,
|
| 54 |
+
)
|
| 55 |
+
from pandas.core.arrays import ExtensionArray
|
| 56 |
+
from pandas.core.indexes.base import Index
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def get_indexer_indexer(
|
| 60 |
+
target: Index,
|
| 61 |
+
level: Level | list[Level] | None,
|
| 62 |
+
ascending: list[bool] | bool,
|
| 63 |
+
kind: SortKind,
|
| 64 |
+
na_position: NaPosition,
|
| 65 |
+
sort_remaining: bool,
|
| 66 |
+
key: IndexKeyFunc,
|
| 67 |
+
) -> npt.NDArray[np.intp] | None:
|
| 68 |
+
"""
|
| 69 |
+
Helper method that return the indexer according to input parameters for
|
| 70 |
+
the sort_index method of DataFrame and Series.
|
| 71 |
+
|
| 72 |
+
Parameters
|
| 73 |
+
----------
|
| 74 |
+
target : Index
|
| 75 |
+
level : int or level name or list of ints or list of level names
|
| 76 |
+
ascending : bool or list of bools, default True
|
| 77 |
+
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}
|
| 78 |
+
na_position : {'first', 'last'}
|
| 79 |
+
sort_remaining : bool
|
| 80 |
+
key : callable, optional
|
| 81 |
+
|
| 82 |
+
Returns
|
| 83 |
+
-------
|
| 84 |
+
Optional[ndarray[intp]]
|
| 85 |
+
The indexer for the new index.
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
# error: Incompatible types in assignment (expression has type
|
| 89 |
+
# "Union[ExtensionArray, ndarray[Any, Any], Index, Series]", variable has
|
| 90 |
+
# type "Index")
|
| 91 |
+
target = ensure_key_mapped(target, key, levels=level) # type: ignore[assignment]
|
| 92 |
+
target = target._sort_levels_monotonic()
|
| 93 |
+
|
| 94 |
+
if level is not None:
|
| 95 |
+
_, indexer = target.sortlevel(
|
| 96 |
+
level,
|
| 97 |
+
ascending=ascending,
|
| 98 |
+
sort_remaining=sort_remaining,
|
| 99 |
+
na_position=na_position,
|
| 100 |
+
)
|
| 101 |
+
elif (np.all(ascending) and target.is_monotonic_increasing) or (
|
| 102 |
+
not np.any(ascending) and target.is_monotonic_decreasing
|
| 103 |
+
):
|
| 104 |
+
# Check monotonic-ness before sort an index (GH 11080)
|
| 105 |
+
return None
|
| 106 |
+
elif isinstance(target, ABCMultiIndex):
|
| 107 |
+
codes = [lev.codes for lev in target._get_codes_for_sorting()]
|
| 108 |
+
indexer = lexsort_indexer(
|
| 109 |
+
codes, orders=ascending, na_position=na_position, codes_given=True
|
| 110 |
+
)
|
| 111 |
+
else:
|
| 112 |
+
# ascending can only be a Sequence for MultiIndex
|
| 113 |
+
indexer = nargsort(
|
| 114 |
+
target,
|
| 115 |
+
kind=kind,
|
| 116 |
+
ascending=cast(bool, ascending),
|
| 117 |
+
na_position=na_position,
|
| 118 |
+
)
|
| 119 |
+
return indexer
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def get_group_index(
|
| 123 |
+
labels, shape: Shape, sort: bool, xnull: bool
|
| 124 |
+
) -> npt.NDArray[np.int64]:
|
| 125 |
+
"""
|
| 126 |
+
For the particular label_list, gets the offsets into the hypothetical list
|
| 127 |
+
representing the totally ordered cartesian product of all possible label
|
| 128 |
+
combinations, *as long as* this space fits within int64 bounds;
|
| 129 |
+
otherwise, though group indices identify unique combinations of
|
| 130 |
+
labels, they cannot be deconstructed.
|
| 131 |
+
- If `sort`, rank of returned ids preserve lexical ranks of labels.
|
| 132 |
+
i.e. returned id's can be used to do lexical sort on labels;
|
| 133 |
+
- If `xnull` nulls (-1 labels) are passed through.
|
| 134 |
+
|
| 135 |
+
Parameters
|
| 136 |
+
----------
|
| 137 |
+
labels : sequence of arrays
|
| 138 |
+
Integers identifying levels at each location
|
| 139 |
+
shape : tuple[int, ...]
|
| 140 |
+
Number of unique levels at each location
|
| 141 |
+
sort : bool
|
| 142 |
+
If the ranks of returned ids should match lexical ranks of labels
|
| 143 |
+
xnull : bool
|
| 144 |
+
If true nulls are excluded. i.e. -1 values in the labels are
|
| 145 |
+
passed through.
|
| 146 |
+
|
| 147 |
+
Returns
|
| 148 |
+
-------
|
| 149 |
+
An array of type int64 where two elements are equal if their corresponding
|
| 150 |
+
labels are equal at all location.
|
| 151 |
+
|
| 152 |
+
Notes
|
| 153 |
+
-----
|
| 154 |
+
The length of `labels` and `shape` must be identical.
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
def _int64_cut_off(shape) -> int:
|
| 158 |
+
acc = 1
|
| 159 |
+
for i, mul in enumerate(shape):
|
| 160 |
+
acc *= int(mul)
|
| 161 |
+
if not acc < lib.i8max:
|
| 162 |
+
return i
|
| 163 |
+
return len(shape)
|
| 164 |
+
|
| 165 |
+
def maybe_lift(lab, size: int) -> tuple[np.ndarray, int]:
|
| 166 |
+
# promote nan values (assigned -1 label in lab array)
|
| 167 |
+
# so that all output values are non-negative
|
| 168 |
+
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
|
| 169 |
+
|
| 170 |
+
labels = [ensure_int64(x) for x in labels]
|
| 171 |
+
lshape = list(shape)
|
| 172 |
+
if not xnull:
|
| 173 |
+
for i, (lab, size) in enumerate(zip(labels, shape)):
|
| 174 |
+
labels[i], lshape[i] = maybe_lift(lab, size)
|
| 175 |
+
|
| 176 |
+
labels = list(labels)
|
| 177 |
+
|
| 178 |
+
# Iteratively process all the labels in chunks sized so less
|
| 179 |
+
# than lib.i8max unique int ids will be required for each chunk
|
| 180 |
+
while True:
|
| 181 |
+
# how many levels can be done without overflow:
|
| 182 |
+
nlev = _int64_cut_off(lshape)
|
| 183 |
+
|
| 184 |
+
# compute flat ids for the first `nlev` levels
|
| 185 |
+
stride = np.prod(lshape[1:nlev], dtype="i8")
|
| 186 |
+
out = stride * labels[0].astype("i8", subok=False, copy=False)
|
| 187 |
+
|
| 188 |
+
for i in range(1, nlev):
|
| 189 |
+
if lshape[i] == 0:
|
| 190 |
+
stride = np.int64(0)
|
| 191 |
+
else:
|
| 192 |
+
stride //= lshape[i]
|
| 193 |
+
out += labels[i] * stride
|
| 194 |
+
|
| 195 |
+
if xnull: # exclude nulls
|
| 196 |
+
mask = labels[0] == -1
|
| 197 |
+
for lab in labels[1:nlev]:
|
| 198 |
+
mask |= lab == -1
|
| 199 |
+
out[mask] = -1
|
| 200 |
+
|
| 201 |
+
if nlev == len(lshape): # all levels done!
|
| 202 |
+
break
|
| 203 |
+
|
| 204 |
+
# compress what has been done so far in order to avoid overflow
|
| 205 |
+
# to retain lexical ranks, obs_ids should be sorted
|
| 206 |
+
comp_ids, obs_ids = compress_group_index(out, sort=sort)
|
| 207 |
+
|
| 208 |
+
labels = [comp_ids] + labels[nlev:]
|
| 209 |
+
lshape = [len(obs_ids)] + lshape[nlev:]
|
| 210 |
+
|
| 211 |
+
return out
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def get_compressed_ids(
|
| 215 |
+
labels, sizes: Shape
|
| 216 |
+
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]:
|
| 217 |
+
"""
|
| 218 |
+
Group_index is offsets into cartesian product of all possible labels. This
|
| 219 |
+
space can be huge, so this function compresses it, by computing offsets
|
| 220 |
+
(comp_ids) into the list of unique labels (obs_group_ids).
|
| 221 |
+
|
| 222 |
+
Parameters
|
| 223 |
+
----------
|
| 224 |
+
labels : list of label arrays
|
| 225 |
+
sizes : tuple[int] of size of the levels
|
| 226 |
+
|
| 227 |
+
Returns
|
| 228 |
+
-------
|
| 229 |
+
np.ndarray[np.intp]
|
| 230 |
+
comp_ids
|
| 231 |
+
np.ndarray[np.int64]
|
| 232 |
+
obs_group_ids
|
| 233 |
+
"""
|
| 234 |
+
ids = get_group_index(labels, sizes, sort=True, xnull=False)
|
| 235 |
+
return compress_group_index(ids, sort=True)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def is_int64_overflow_possible(shape: Shape) -> bool:
|
| 239 |
+
the_prod = 1
|
| 240 |
+
for x in shape:
|
| 241 |
+
the_prod *= int(x)
|
| 242 |
+
|
| 243 |
+
return the_prod >= lib.i8max
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def _decons_group_index(
|
| 247 |
+
comp_labels: npt.NDArray[np.intp], shape: Shape
|
| 248 |
+
) -> list[npt.NDArray[np.intp]]:
|
| 249 |
+
# reconstruct labels
|
| 250 |
+
if is_int64_overflow_possible(shape):
|
| 251 |
+
# at some point group indices are factorized,
|
| 252 |
+
# and may not be deconstructed here! wrong path!
|
| 253 |
+
raise ValueError("cannot deconstruct factorized group indices!")
|
| 254 |
+
|
| 255 |
+
label_list = []
|
| 256 |
+
factor = 1
|
| 257 |
+
y = np.array(0)
|
| 258 |
+
x = comp_labels
|
| 259 |
+
for i in reversed(range(len(shape))):
|
| 260 |
+
labels = (x - y) % (factor * shape[i]) // factor
|
| 261 |
+
np.putmask(labels, comp_labels < 0, -1)
|
| 262 |
+
label_list.append(labels)
|
| 263 |
+
y = labels * factor
|
| 264 |
+
factor *= shape[i]
|
| 265 |
+
return label_list[::-1]
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def decons_obs_group_ids(
|
| 269 |
+
comp_ids: npt.NDArray[np.intp],
|
| 270 |
+
obs_ids: npt.NDArray[np.intp],
|
| 271 |
+
shape: Shape,
|
| 272 |
+
labels: Sequence[npt.NDArray[np.signedinteger]],
|
| 273 |
+
xnull: bool,
|
| 274 |
+
) -> list[npt.NDArray[np.intp]]:
|
| 275 |
+
"""
|
| 276 |
+
Reconstruct labels from observed group ids.
|
| 277 |
+
|
| 278 |
+
Parameters
|
| 279 |
+
----------
|
| 280 |
+
comp_ids : np.ndarray[np.intp]
|
| 281 |
+
obs_ids: np.ndarray[np.intp]
|
| 282 |
+
shape : tuple[int]
|
| 283 |
+
labels : Sequence[np.ndarray[np.signedinteger]]
|
| 284 |
+
xnull : bool
|
| 285 |
+
If nulls are excluded; i.e. -1 labels are passed through.
|
| 286 |
+
"""
|
| 287 |
+
if not xnull:
|
| 288 |
+
lift = np.fromiter(((a == -1).any() for a in labels), dtype=np.intp)
|
| 289 |
+
arr_shape = np.asarray(shape, dtype=np.intp) + lift
|
| 290 |
+
shape = tuple(arr_shape)
|
| 291 |
+
|
| 292 |
+
if not is_int64_overflow_possible(shape):
|
| 293 |
+
# obs ids are deconstructable! take the fast route!
|
| 294 |
+
out = _decons_group_index(obs_ids, shape)
|
| 295 |
+
return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)]
|
| 296 |
+
|
| 297 |
+
indexer = unique_label_indices(comp_ids)
|
| 298 |
+
return [lab[indexer].astype(np.intp, subok=False, copy=True) for lab in labels]
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
def lexsort_indexer(
|
| 302 |
+
keys: Sequence[ArrayLike | Index | Series],
|
| 303 |
+
orders=None,
|
| 304 |
+
na_position: str = "last",
|
| 305 |
+
key: Callable | None = None,
|
| 306 |
+
codes_given: bool = False,
|
| 307 |
+
) -> npt.NDArray[np.intp]:
|
| 308 |
+
"""
|
| 309 |
+
Performs lexical sorting on a set of keys
|
| 310 |
+
|
| 311 |
+
Parameters
|
| 312 |
+
----------
|
| 313 |
+
keys : Sequence[ArrayLike | Index | Series]
|
| 314 |
+
Sequence of arrays to be sorted by the indexer
|
| 315 |
+
Sequence[Series] is only if key is not None.
|
| 316 |
+
orders : bool or list of booleans, optional
|
| 317 |
+
Determines the sorting order for each element in keys. If a list,
|
| 318 |
+
it must be the same length as keys. This determines whether the
|
| 319 |
+
corresponding element in keys should be sorted in ascending
|
| 320 |
+
(True) or descending (False) order. if bool, applied to all
|
| 321 |
+
elements as above. if None, defaults to True.
|
| 322 |
+
na_position : {'first', 'last'}, default 'last'
|
| 323 |
+
Determines placement of NA elements in the sorted list ("last" or "first")
|
| 324 |
+
key : Callable, optional
|
| 325 |
+
Callable key function applied to every element in keys before sorting
|
| 326 |
+
codes_given: bool, False
|
| 327 |
+
Avoid categorical materialization if codes are already provided.
|
| 328 |
+
|
| 329 |
+
Returns
|
| 330 |
+
-------
|
| 331 |
+
np.ndarray[np.intp]
|
| 332 |
+
"""
|
| 333 |
+
from pandas.core.arrays import Categorical
|
| 334 |
+
|
| 335 |
+
if na_position not in ["last", "first"]:
|
| 336 |
+
raise ValueError(f"invalid na_position: {na_position}")
|
| 337 |
+
|
| 338 |
+
if isinstance(orders, bool):
|
| 339 |
+
orders = [orders] * len(keys)
|
| 340 |
+
elif orders is None:
|
| 341 |
+
orders = [True] * len(keys)
|
| 342 |
+
|
| 343 |
+
labels = []
|
| 344 |
+
|
| 345 |
+
for k, order in zip(keys, orders):
|
| 346 |
+
k = ensure_key_mapped(k, key)
|
| 347 |
+
if codes_given:
|
| 348 |
+
codes = cast(np.ndarray, k)
|
| 349 |
+
n = codes.max() + 1 if len(codes) else 0
|
| 350 |
+
else:
|
| 351 |
+
cat = Categorical(k, ordered=True)
|
| 352 |
+
codes = cat.codes
|
| 353 |
+
n = len(cat.categories)
|
| 354 |
+
|
| 355 |
+
mask = codes == -1
|
| 356 |
+
|
| 357 |
+
if na_position == "last" and mask.any():
|
| 358 |
+
codes = np.where(mask, n, codes)
|
| 359 |
+
|
| 360 |
+
# not order means descending
|
| 361 |
+
if not order:
|
| 362 |
+
codes = np.where(mask, codes, n - codes - 1)
|
| 363 |
+
|
| 364 |
+
labels.append(codes)
|
| 365 |
+
|
| 366 |
+
return np.lexsort(labels[::-1])
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def nargsort(
|
| 370 |
+
items: ArrayLike | Index | Series,
|
| 371 |
+
kind: SortKind = "quicksort",
|
| 372 |
+
ascending: bool = True,
|
| 373 |
+
na_position: str = "last",
|
| 374 |
+
key: Callable | None = None,
|
| 375 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
| 376 |
+
) -> npt.NDArray[np.intp]:
|
| 377 |
+
"""
|
| 378 |
+
Intended to be a drop-in replacement for np.argsort which handles NaNs.
|
| 379 |
+
|
| 380 |
+
Adds ascending, na_position, and key parameters.
|
| 381 |
+
|
| 382 |
+
(GH #6399, #5231, #27237)
|
| 383 |
+
|
| 384 |
+
Parameters
|
| 385 |
+
----------
|
| 386 |
+
items : np.ndarray, ExtensionArray, Index, or Series
|
| 387 |
+
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
|
| 388 |
+
ascending : bool, default True
|
| 389 |
+
na_position : {'first', 'last'}, default 'last'
|
| 390 |
+
key : Optional[Callable], default None
|
| 391 |
+
mask : Optional[np.ndarray[bool]], default None
|
| 392 |
+
Passed when called by ExtensionArray.argsort.
|
| 393 |
+
|
| 394 |
+
Returns
|
| 395 |
+
-------
|
| 396 |
+
np.ndarray[np.intp]
|
| 397 |
+
"""
|
| 398 |
+
|
| 399 |
+
if key is not None:
|
| 400 |
+
# see TestDataFrameSortKey, TestRangeIndex::test_sort_values_key
|
| 401 |
+
items = ensure_key_mapped(items, key)
|
| 402 |
+
return nargsort(
|
| 403 |
+
items,
|
| 404 |
+
kind=kind,
|
| 405 |
+
ascending=ascending,
|
| 406 |
+
na_position=na_position,
|
| 407 |
+
key=None,
|
| 408 |
+
mask=mask,
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
if isinstance(items, ABCRangeIndex):
|
| 412 |
+
return items.argsort(ascending=ascending)
|
| 413 |
+
elif not isinstance(items, ABCMultiIndex):
|
| 414 |
+
items = extract_array(items)
|
| 415 |
+
else:
|
| 416 |
+
raise TypeError(
|
| 417 |
+
"nargsort does not support MultiIndex. Use index.sort_values instead."
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
if mask is None:
|
| 421 |
+
mask = np.asarray(isna(items))
|
| 422 |
+
|
| 423 |
+
if not isinstance(items, np.ndarray):
|
| 424 |
+
# i.e. ExtensionArray
|
| 425 |
+
return items.argsort(
|
| 426 |
+
ascending=ascending,
|
| 427 |
+
kind=kind,
|
| 428 |
+
na_position=na_position,
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
idx = np.arange(len(items))
|
| 432 |
+
non_nans = items[~mask]
|
| 433 |
+
non_nan_idx = idx[~mask]
|
| 434 |
+
|
| 435 |
+
nan_idx = np.nonzero(mask)[0]
|
| 436 |
+
if not ascending:
|
| 437 |
+
non_nans = non_nans[::-1]
|
| 438 |
+
non_nan_idx = non_nan_idx[::-1]
|
| 439 |
+
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
|
| 440 |
+
if not ascending:
|
| 441 |
+
indexer = indexer[::-1]
|
| 442 |
+
# Finally, place the NaNs at the end or the beginning according to
|
| 443 |
+
# na_position
|
| 444 |
+
if na_position == "last":
|
| 445 |
+
indexer = np.concatenate([indexer, nan_idx])
|
| 446 |
+
elif na_position == "first":
|
| 447 |
+
indexer = np.concatenate([nan_idx, indexer])
|
| 448 |
+
else:
|
| 449 |
+
raise ValueError(f"invalid na_position: {na_position}")
|
| 450 |
+
return ensure_platform_int(indexer)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def nargminmax(values: ExtensionArray, method: str, axis: AxisInt = 0):
|
| 454 |
+
"""
|
| 455 |
+
Implementation of np.argmin/argmax but for ExtensionArray and which
|
| 456 |
+
handles missing values.
|
| 457 |
+
|
| 458 |
+
Parameters
|
| 459 |
+
----------
|
| 460 |
+
values : ExtensionArray
|
| 461 |
+
method : {"argmax", "argmin"}
|
| 462 |
+
axis : int, default 0
|
| 463 |
+
|
| 464 |
+
Returns
|
| 465 |
+
-------
|
| 466 |
+
int
|
| 467 |
+
"""
|
| 468 |
+
assert method in {"argmax", "argmin"}
|
| 469 |
+
func = np.argmax if method == "argmax" else np.argmin
|
| 470 |
+
|
| 471 |
+
mask = np.asarray(isna(values))
|
| 472 |
+
arr_values = values._values_for_argsort()
|
| 473 |
+
|
| 474 |
+
if arr_values.ndim > 1:
|
| 475 |
+
if mask.any():
|
| 476 |
+
if axis == 1:
|
| 477 |
+
zipped = zip(arr_values, mask)
|
| 478 |
+
else:
|
| 479 |
+
zipped = zip(arr_values.T, mask.T)
|
| 480 |
+
return np.array([_nanargminmax(v, m, func) for v, m in zipped])
|
| 481 |
+
return func(arr_values, axis=axis)
|
| 482 |
+
|
| 483 |
+
return _nanargminmax(arr_values, mask, func)
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
def _nanargminmax(values: np.ndarray, mask: npt.NDArray[np.bool_], func) -> int:
|
| 487 |
+
"""
|
| 488 |
+
See nanargminmax.__doc__.
|
| 489 |
+
"""
|
| 490 |
+
idx = np.arange(values.shape[0])
|
| 491 |
+
non_nans = values[~mask]
|
| 492 |
+
non_nan_idx = idx[~mask]
|
| 493 |
+
|
| 494 |
+
return non_nan_idx[func(non_nans)]
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def _ensure_key_mapped_multiindex(
|
| 498 |
+
index: MultiIndex, key: Callable, level=None
|
| 499 |
+
) -> MultiIndex:
|
| 500 |
+
"""
|
| 501 |
+
Returns a new MultiIndex in which key has been applied
|
| 502 |
+
to all levels specified in level (or all levels if level
|
| 503 |
+
is None). Used for key sorting for MultiIndex.
|
| 504 |
+
|
| 505 |
+
Parameters
|
| 506 |
+
----------
|
| 507 |
+
index : MultiIndex
|
| 508 |
+
Index to which to apply the key function on the
|
| 509 |
+
specified levels.
|
| 510 |
+
key : Callable
|
| 511 |
+
Function that takes an Index and returns an Index of
|
| 512 |
+
the same shape. This key is applied to each level
|
| 513 |
+
separately. The name of the level can be used to
|
| 514 |
+
distinguish different levels for application.
|
| 515 |
+
level : list-like, int or str, default None
|
| 516 |
+
Level or list of levels to apply the key function to.
|
| 517 |
+
If None, key function is applied to all levels. Other
|
| 518 |
+
levels are left unchanged.
|
| 519 |
+
|
| 520 |
+
Returns
|
| 521 |
+
-------
|
| 522 |
+
labels : MultiIndex
|
| 523 |
+
Resulting MultiIndex with modified levels.
|
| 524 |
+
"""
|
| 525 |
+
|
| 526 |
+
if level is not None:
|
| 527 |
+
if isinstance(level, (str, int)):
|
| 528 |
+
sort_levels = [level]
|
| 529 |
+
else:
|
| 530 |
+
sort_levels = level
|
| 531 |
+
|
| 532 |
+
sort_levels = [index._get_level_number(lev) for lev in sort_levels]
|
| 533 |
+
else:
|
| 534 |
+
sort_levels = list(range(index.nlevels)) # satisfies mypy
|
| 535 |
+
|
| 536 |
+
mapped = [
|
| 537 |
+
ensure_key_mapped(index._get_level_values(level), key)
|
| 538 |
+
if level in sort_levels
|
| 539 |
+
else index._get_level_values(level)
|
| 540 |
+
for level in range(index.nlevels)
|
| 541 |
+
]
|
| 542 |
+
|
| 543 |
+
return type(index).from_arrays(mapped)
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def ensure_key_mapped(
|
| 547 |
+
values: ArrayLike | Index | Series, key: Callable | None, levels=None
|
| 548 |
+
) -> ArrayLike | Index | Series:
|
| 549 |
+
"""
|
| 550 |
+
Applies a callable key function to the values function and checks
|
| 551 |
+
that the resulting value has the same shape. Can be called on Index
|
| 552 |
+
subclasses, Series, DataFrames, or ndarrays.
|
| 553 |
+
|
| 554 |
+
Parameters
|
| 555 |
+
----------
|
| 556 |
+
values : Series, DataFrame, Index subclass, or ndarray
|
| 557 |
+
key : Optional[Callable], key to be called on the values array
|
| 558 |
+
levels : Optional[List], if values is a MultiIndex, list of levels to
|
| 559 |
+
apply the key to.
|
| 560 |
+
"""
|
| 561 |
+
from pandas.core.indexes.api import Index
|
| 562 |
+
|
| 563 |
+
if not key:
|
| 564 |
+
return values
|
| 565 |
+
|
| 566 |
+
if isinstance(values, ABCMultiIndex):
|
| 567 |
+
return _ensure_key_mapped_multiindex(values, key, level=levels)
|
| 568 |
+
|
| 569 |
+
result = key(values.copy())
|
| 570 |
+
if len(result) != len(values):
|
| 571 |
+
raise ValueError(
|
| 572 |
+
"User-provided `key` function must not change the shape of the array."
|
| 573 |
+
)
|
| 574 |
+
|
| 575 |
+
try:
|
| 576 |
+
if isinstance(
|
| 577 |
+
values, Index
|
| 578 |
+
): # convert to a new Index subclass, not necessarily the same
|
| 579 |
+
result = Index(result)
|
| 580 |
+
else:
|
| 581 |
+
# try to revert to original type otherwise
|
| 582 |
+
type_of_values = type(values)
|
| 583 |
+
# error: Too many arguments for "ExtensionArray"
|
| 584 |
+
result = type_of_values(result) # type: ignore[call-arg]
|
| 585 |
+
except TypeError:
|
| 586 |
+
raise TypeError(
|
| 587 |
+
f"User-provided `key` function returned an invalid type {type(result)} \
|
| 588 |
+
which could not be converted to {type(values)}."
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
return result
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
def get_flattened_list(
|
| 595 |
+
comp_ids: npt.NDArray[np.intp],
|
| 596 |
+
ngroups: int,
|
| 597 |
+
levels: Iterable[Index],
|
| 598 |
+
labels: Iterable[np.ndarray],
|
| 599 |
+
) -> list[tuple]:
|
| 600 |
+
"""Map compressed group id -> key tuple."""
|
| 601 |
+
comp_ids = comp_ids.astype(np.int64, copy=False)
|
| 602 |
+
arrays: DefaultDict[int, list[int]] = defaultdict(list)
|
| 603 |
+
for labs, level in zip(labels, levels):
|
| 604 |
+
table = hashtable.Int64HashTable(ngroups)
|
| 605 |
+
table.map_keys_to_values(comp_ids, labs.astype(np.int64, copy=False))
|
| 606 |
+
for i in range(ngroups):
|
| 607 |
+
arrays[i].append(level[table.get_item(i)])
|
| 608 |
+
return [tuple(array) for array in arrays.values()]
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
def get_indexer_dict(
|
| 612 |
+
label_list: list[np.ndarray], keys: list[Index]
|
| 613 |
+
) -> dict[Hashable, npt.NDArray[np.intp]]:
|
| 614 |
+
"""
|
| 615 |
+
Returns
|
| 616 |
+
-------
|
| 617 |
+
dict:
|
| 618 |
+
Labels mapped to indexers.
|
| 619 |
+
"""
|
| 620 |
+
shape = tuple(len(x) for x in keys)
|
| 621 |
+
|
| 622 |
+
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
|
| 623 |
+
if np.all(group_index == -1):
|
| 624 |
+
# Short-circuit, lib.indices_fast will return the same
|
| 625 |
+
return {}
|
| 626 |
+
ngroups = (
|
| 627 |
+
((group_index.size and group_index.max()) + 1)
|
| 628 |
+
if is_int64_overflow_possible(shape)
|
| 629 |
+
else np.prod(shape, dtype="i8")
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
sorter = get_group_index_sorter(group_index, ngroups)
|
| 633 |
+
|
| 634 |
+
sorted_labels = [lab.take(sorter) for lab in label_list]
|
| 635 |
+
group_index = group_index.take(sorter)
|
| 636 |
+
|
| 637 |
+
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
# ----------------------------------------------------------------------
|
| 641 |
+
# sorting levels...cleverly?
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
def get_group_index_sorter(
|
| 645 |
+
group_index: npt.NDArray[np.intp], ngroups: int | None = None
|
| 646 |
+
) -> npt.NDArray[np.intp]:
|
| 647 |
+
"""
|
| 648 |
+
algos.groupsort_indexer implements `counting sort` and it is at least
|
| 649 |
+
O(ngroups), where
|
| 650 |
+
ngroups = prod(shape)
|
| 651 |
+
shape = map(len, keys)
|
| 652 |
+
that is, linear in the number of combinations (cartesian product) of unique
|
| 653 |
+
values of groupby keys. This can be huge when doing multi-key groupby.
|
| 654 |
+
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
|
| 655 |
+
length of the data-frame;
|
| 656 |
+
Both algorithms are `stable` sort and that is necessary for correctness of
|
| 657 |
+
groupby operations. e.g. consider:
|
| 658 |
+
df.groupby(key)[col].transform('first')
|
| 659 |
+
|
| 660 |
+
Parameters
|
| 661 |
+
----------
|
| 662 |
+
group_index : np.ndarray[np.intp]
|
| 663 |
+
signed integer dtype
|
| 664 |
+
ngroups : int or None, default None
|
| 665 |
+
|
| 666 |
+
Returns
|
| 667 |
+
-------
|
| 668 |
+
np.ndarray[np.intp]
|
| 669 |
+
"""
|
| 670 |
+
if ngroups is None:
|
| 671 |
+
ngroups = 1 + group_index.max()
|
| 672 |
+
count = len(group_index)
|
| 673 |
+
alpha = 0.0 # taking complexities literally; there may be
|
| 674 |
+
beta = 1.0 # some room for fine-tuning these parameters
|
| 675 |
+
do_groupsort = count > 0 and ((alpha + beta * ngroups) < (count * np.log(count)))
|
| 676 |
+
if do_groupsort:
|
| 677 |
+
sorter, _ = algos.groupsort_indexer(
|
| 678 |
+
ensure_platform_int(group_index),
|
| 679 |
+
ngroups,
|
| 680 |
+
)
|
| 681 |
+
# sorter _should_ already be intp, but mypy is not yet able to verify
|
| 682 |
+
else:
|
| 683 |
+
sorter = group_index.argsort(kind="mergesort")
|
| 684 |
+
return ensure_platform_int(sorter)
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
def compress_group_index(
|
| 688 |
+
group_index: npt.NDArray[np.int64], sort: bool = True
|
| 689 |
+
) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]:
|
| 690 |
+
"""
|
| 691 |
+
Group_index is offsets into cartesian product of all possible labels. This
|
| 692 |
+
space can be huge, so this function compresses it, by computing offsets
|
| 693 |
+
(comp_ids) into the list of unique labels (obs_group_ids).
|
| 694 |
+
"""
|
| 695 |
+
if len(group_index) and np.all(group_index[1:] >= group_index[:-1]):
|
| 696 |
+
# GH 53806: fast path for sorted group_index
|
| 697 |
+
unique_mask = np.concatenate(
|
| 698 |
+
[group_index[:1] > -1, group_index[1:] != group_index[:-1]]
|
| 699 |
+
)
|
| 700 |
+
comp_ids = unique_mask.cumsum()
|
| 701 |
+
comp_ids -= 1
|
| 702 |
+
obs_group_ids = group_index[unique_mask]
|
| 703 |
+
else:
|
| 704 |
+
size_hint = len(group_index)
|
| 705 |
+
table = hashtable.Int64HashTable(size_hint)
|
| 706 |
+
|
| 707 |
+
group_index = ensure_int64(group_index)
|
| 708 |
+
|
| 709 |
+
# note, group labels come out ascending (ie, 1,2,3 etc)
|
| 710 |
+
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
|
| 711 |
+
|
| 712 |
+
if sort and len(obs_group_ids) > 0:
|
| 713 |
+
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
|
| 714 |
+
|
| 715 |
+
return ensure_int64(comp_ids), ensure_int64(obs_group_ids)
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
def _reorder_by_uniques(
|
| 719 |
+
uniques: npt.NDArray[np.int64], labels: npt.NDArray[np.intp]
|
| 720 |
+
) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.intp]]:
|
| 721 |
+
"""
|
| 722 |
+
Parameters
|
| 723 |
+
----------
|
| 724 |
+
uniques : np.ndarray[np.int64]
|
| 725 |
+
labels : np.ndarray[np.intp]
|
| 726 |
+
|
| 727 |
+
Returns
|
| 728 |
+
-------
|
| 729 |
+
np.ndarray[np.int64]
|
| 730 |
+
np.ndarray[np.intp]
|
| 731 |
+
"""
|
| 732 |
+
# sorter is index where elements ought to go
|
| 733 |
+
sorter = uniques.argsort()
|
| 734 |
+
|
| 735 |
+
# reverse_indexer is where elements came from
|
| 736 |
+
reverse_indexer = np.empty(len(sorter), dtype=np.intp)
|
| 737 |
+
reverse_indexer.put(sorter, np.arange(len(sorter)))
|
| 738 |
+
|
| 739 |
+
mask = labels < 0
|
| 740 |
+
|
| 741 |
+
# move labels to right locations (ie, unsort ascending labels)
|
| 742 |
+
labels = reverse_indexer.take(labels)
|
| 743 |
+
np.putmask(labels, mask, -1)
|
| 744 |
+
|
| 745 |
+
# sort observed ids
|
| 746 |
+
uniques = uniques.take(sorter)
|
| 747 |
+
|
| 748 |
+
return uniques, labels
|
videollama2/lib/python3.10/site-packages/pandas/errors/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (28.9 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/tseries/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ruff: noqa: TCH004
|
| 2 |
+
from typing import TYPE_CHECKING
|
| 3 |
+
|
| 4 |
+
if TYPE_CHECKING:
|
| 5 |
+
# import modules that have public classes/functions:
|
| 6 |
+
from pandas.tseries import (
|
| 7 |
+
frequencies,
|
| 8 |
+
offsets,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
# and mark only those modules as public
|
| 12 |
+
__all__ = ["frequencies", "offsets"]
|
videollama2/lib/python3.10/site-packages/pandas/tseries/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (324 Bytes). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/tseries/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (415 Bytes). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/tseries/__pycache__/frequencies.cpython-310.pyc
ADDED
|
Binary file (14.3 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/tseries/__pycache__/holiday.cpython-310.pyc
ADDED
|
Binary file (16.3 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/tseries/__pycache__/offsets.cpython-310.pyc
ADDED
|
Binary file (1.37 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/tseries/api.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Timeseries API
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from pandas._libs.tslibs.parsing import guess_datetime_format
|
| 6 |
+
|
| 7 |
+
from pandas.tseries import offsets
|
| 8 |
+
from pandas.tseries.frequencies import infer_freq
|
| 9 |
+
|
| 10 |
+
__all__ = ["infer_freq", "offsets", "guess_datetime_format"]
|
videollama2/lib/python3.10/site-packages/pandas/tseries/frequencies.py
ADDED
|
@@ -0,0 +1,602 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from pandas._libs import lib
|
| 8 |
+
from pandas._libs.algos import unique_deltas
|
| 9 |
+
from pandas._libs.tslibs import (
|
| 10 |
+
Timestamp,
|
| 11 |
+
get_unit_from_dtype,
|
| 12 |
+
periods_per_day,
|
| 13 |
+
tz_convert_from_utc,
|
| 14 |
+
)
|
| 15 |
+
from pandas._libs.tslibs.ccalendar import (
|
| 16 |
+
DAYS,
|
| 17 |
+
MONTH_ALIASES,
|
| 18 |
+
MONTH_NUMBERS,
|
| 19 |
+
MONTHS,
|
| 20 |
+
int_to_weekday,
|
| 21 |
+
)
|
| 22 |
+
from pandas._libs.tslibs.dtypes import (
|
| 23 |
+
OFFSET_TO_PERIOD_FREQSTR,
|
| 24 |
+
freq_to_period_freqstr,
|
| 25 |
+
)
|
| 26 |
+
from pandas._libs.tslibs.fields import (
|
| 27 |
+
build_field_sarray,
|
| 28 |
+
month_position_check,
|
| 29 |
+
)
|
| 30 |
+
from pandas._libs.tslibs.offsets import (
|
| 31 |
+
DateOffset,
|
| 32 |
+
Day,
|
| 33 |
+
to_offset,
|
| 34 |
+
)
|
| 35 |
+
from pandas._libs.tslibs.parsing import get_rule_month
|
| 36 |
+
from pandas.util._decorators import cache_readonly
|
| 37 |
+
|
| 38 |
+
from pandas.core.dtypes.common import is_numeric_dtype
|
| 39 |
+
from pandas.core.dtypes.dtypes import (
|
| 40 |
+
DatetimeTZDtype,
|
| 41 |
+
PeriodDtype,
|
| 42 |
+
)
|
| 43 |
+
from pandas.core.dtypes.generic import (
|
| 44 |
+
ABCIndex,
|
| 45 |
+
ABCSeries,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
from pandas.core.algorithms import unique
|
| 49 |
+
|
| 50 |
+
if TYPE_CHECKING:
|
| 51 |
+
from pandas._typing import npt
|
| 52 |
+
|
| 53 |
+
from pandas import (
|
| 54 |
+
DatetimeIndex,
|
| 55 |
+
Series,
|
| 56 |
+
TimedeltaIndex,
|
| 57 |
+
)
|
| 58 |
+
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
|
| 59 |
+
# --------------------------------------------------------------------
|
| 60 |
+
# Offset related functions
|
| 61 |
+
|
| 62 |
+
_need_suffix = ["QS", "BQE", "BQS", "YS", "BYE", "BYS"]
|
| 63 |
+
|
| 64 |
+
for _prefix in _need_suffix:
|
| 65 |
+
for _m in MONTHS:
|
| 66 |
+
key = f"{_prefix}-{_m}"
|
| 67 |
+
OFFSET_TO_PERIOD_FREQSTR[key] = OFFSET_TO_PERIOD_FREQSTR[_prefix]
|
| 68 |
+
|
| 69 |
+
for _prefix in ["Y", "Q"]:
|
| 70 |
+
for _m in MONTHS:
|
| 71 |
+
_alias = f"{_prefix}-{_m}"
|
| 72 |
+
OFFSET_TO_PERIOD_FREQSTR[_alias] = _alias
|
| 73 |
+
|
| 74 |
+
for _d in DAYS:
|
| 75 |
+
OFFSET_TO_PERIOD_FREQSTR[f"W-{_d}"] = f"W-{_d}"
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def get_period_alias(offset_str: str) -> str | None:
|
| 79 |
+
"""
|
| 80 |
+
Alias to closest period strings BQ->Q etc.
|
| 81 |
+
"""
|
| 82 |
+
return OFFSET_TO_PERIOD_FREQSTR.get(offset_str, None)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# ---------------------------------------------------------------------
|
| 86 |
+
# Period codes
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def infer_freq(
|
| 90 |
+
index: DatetimeIndex | TimedeltaIndex | Series | DatetimeLikeArrayMixin,
|
| 91 |
+
) -> str | None:
|
| 92 |
+
"""
|
| 93 |
+
Infer the most likely frequency given the input index.
|
| 94 |
+
|
| 95 |
+
Parameters
|
| 96 |
+
----------
|
| 97 |
+
index : DatetimeIndex, TimedeltaIndex, Series or array-like
|
| 98 |
+
If passed a Series will use the values of the series (NOT THE INDEX).
|
| 99 |
+
|
| 100 |
+
Returns
|
| 101 |
+
-------
|
| 102 |
+
str or None
|
| 103 |
+
None if no discernible frequency.
|
| 104 |
+
|
| 105 |
+
Raises
|
| 106 |
+
------
|
| 107 |
+
TypeError
|
| 108 |
+
If the index is not datetime-like.
|
| 109 |
+
ValueError
|
| 110 |
+
If there are fewer than three values.
|
| 111 |
+
|
| 112 |
+
Examples
|
| 113 |
+
--------
|
| 114 |
+
>>> idx = pd.date_range(start='2020/12/01', end='2020/12/30', periods=30)
|
| 115 |
+
>>> pd.infer_freq(idx)
|
| 116 |
+
'D'
|
| 117 |
+
"""
|
| 118 |
+
from pandas.core.api import DatetimeIndex
|
| 119 |
+
|
| 120 |
+
if isinstance(index, ABCSeries):
|
| 121 |
+
values = index._values
|
| 122 |
+
if not (
|
| 123 |
+
lib.is_np_dtype(values.dtype, "mM")
|
| 124 |
+
or isinstance(values.dtype, DatetimeTZDtype)
|
| 125 |
+
or values.dtype == object
|
| 126 |
+
):
|
| 127 |
+
raise TypeError(
|
| 128 |
+
"cannot infer freq from a non-convertible dtype "
|
| 129 |
+
f"on a Series of {index.dtype}"
|
| 130 |
+
)
|
| 131 |
+
index = values
|
| 132 |
+
|
| 133 |
+
inferer: _FrequencyInferer
|
| 134 |
+
|
| 135 |
+
if not hasattr(index, "dtype"):
|
| 136 |
+
pass
|
| 137 |
+
elif isinstance(index.dtype, PeriodDtype):
|
| 138 |
+
raise TypeError(
|
| 139 |
+
"PeriodIndex given. Check the `freq` attribute "
|
| 140 |
+
"instead of using infer_freq."
|
| 141 |
+
)
|
| 142 |
+
elif lib.is_np_dtype(index.dtype, "m"):
|
| 143 |
+
# Allow TimedeltaIndex and TimedeltaArray
|
| 144 |
+
inferer = _TimedeltaFrequencyInferer(index)
|
| 145 |
+
return inferer.get_freq()
|
| 146 |
+
|
| 147 |
+
elif is_numeric_dtype(index.dtype):
|
| 148 |
+
raise TypeError(
|
| 149 |
+
f"cannot infer freq from a non-convertible index of dtype {index.dtype}"
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
if not isinstance(index, DatetimeIndex):
|
| 153 |
+
index = DatetimeIndex(index)
|
| 154 |
+
|
| 155 |
+
inferer = _FrequencyInferer(index)
|
| 156 |
+
return inferer.get_freq()
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class _FrequencyInferer:
|
| 160 |
+
"""
|
| 161 |
+
Not sure if I can avoid the state machine here
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
def __init__(self, index) -> None:
|
| 165 |
+
self.index = index
|
| 166 |
+
self.i8values = index.asi8
|
| 167 |
+
|
| 168 |
+
# For get_unit_from_dtype we need the dtype to the underlying ndarray,
|
| 169 |
+
# which for tz-aware is not the same as index.dtype
|
| 170 |
+
if isinstance(index, ABCIndex):
|
| 171 |
+
# error: Item "ndarray[Any, Any]" of "Union[ExtensionArray,
|
| 172 |
+
# ndarray[Any, Any]]" has no attribute "_ndarray"
|
| 173 |
+
self._creso = get_unit_from_dtype(
|
| 174 |
+
index._data._ndarray.dtype # type: ignore[union-attr]
|
| 175 |
+
)
|
| 176 |
+
else:
|
| 177 |
+
# otherwise we have DTA/TDA
|
| 178 |
+
self._creso = get_unit_from_dtype(index._ndarray.dtype)
|
| 179 |
+
|
| 180 |
+
# This moves the values, which are implicitly in UTC, to the
|
| 181 |
+
# the timezone so they are in local time
|
| 182 |
+
if hasattr(index, "tz"):
|
| 183 |
+
if index.tz is not None:
|
| 184 |
+
self.i8values = tz_convert_from_utc(
|
| 185 |
+
self.i8values, index.tz, reso=self._creso
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
if len(index) < 3:
|
| 189 |
+
raise ValueError("Need at least 3 dates to infer frequency")
|
| 190 |
+
|
| 191 |
+
self.is_monotonic = (
|
| 192 |
+
self.index._is_monotonic_increasing or self.index._is_monotonic_decreasing
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
@cache_readonly
|
| 196 |
+
def deltas(self) -> npt.NDArray[np.int64]:
|
| 197 |
+
return unique_deltas(self.i8values)
|
| 198 |
+
|
| 199 |
+
@cache_readonly
|
| 200 |
+
def deltas_asi8(self) -> npt.NDArray[np.int64]:
|
| 201 |
+
# NB: we cannot use self.i8values here because we may have converted
|
| 202 |
+
# the tz in __init__
|
| 203 |
+
return unique_deltas(self.index.asi8)
|
| 204 |
+
|
| 205 |
+
@cache_readonly
|
| 206 |
+
def is_unique(self) -> bool:
|
| 207 |
+
return len(self.deltas) == 1
|
| 208 |
+
|
| 209 |
+
@cache_readonly
|
| 210 |
+
def is_unique_asi8(self) -> bool:
|
| 211 |
+
return len(self.deltas_asi8) == 1
|
| 212 |
+
|
| 213 |
+
def get_freq(self) -> str | None:
|
| 214 |
+
"""
|
| 215 |
+
Find the appropriate frequency string to describe the inferred
|
| 216 |
+
frequency of self.i8values
|
| 217 |
+
|
| 218 |
+
Returns
|
| 219 |
+
-------
|
| 220 |
+
str or None
|
| 221 |
+
"""
|
| 222 |
+
if not self.is_monotonic or not self.index._is_unique:
|
| 223 |
+
return None
|
| 224 |
+
|
| 225 |
+
delta = self.deltas[0]
|
| 226 |
+
ppd = periods_per_day(self._creso)
|
| 227 |
+
if delta and _is_multiple(delta, ppd):
|
| 228 |
+
return self._infer_daily_rule()
|
| 229 |
+
|
| 230 |
+
# Business hourly, maybe. 17: one day / 65: one weekend
|
| 231 |
+
if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):
|
| 232 |
+
return "bh"
|
| 233 |
+
|
| 234 |
+
# Possibly intraday frequency. Here we use the
|
| 235 |
+
# original .asi8 values as the modified values
|
| 236 |
+
# will not work around DST transitions. See #8772
|
| 237 |
+
if not self.is_unique_asi8:
|
| 238 |
+
return None
|
| 239 |
+
|
| 240 |
+
delta = self.deltas_asi8[0]
|
| 241 |
+
pph = ppd // 24
|
| 242 |
+
ppm = pph // 60
|
| 243 |
+
pps = ppm // 60
|
| 244 |
+
if _is_multiple(delta, pph):
|
| 245 |
+
# Hours
|
| 246 |
+
return _maybe_add_count("h", delta / pph)
|
| 247 |
+
elif _is_multiple(delta, ppm):
|
| 248 |
+
# Minutes
|
| 249 |
+
return _maybe_add_count("min", delta / ppm)
|
| 250 |
+
elif _is_multiple(delta, pps):
|
| 251 |
+
# Seconds
|
| 252 |
+
return _maybe_add_count("s", delta / pps)
|
| 253 |
+
elif _is_multiple(delta, (pps // 1000)):
|
| 254 |
+
# Milliseconds
|
| 255 |
+
return _maybe_add_count("ms", delta / (pps // 1000))
|
| 256 |
+
elif _is_multiple(delta, (pps // 1_000_000)):
|
| 257 |
+
# Microseconds
|
| 258 |
+
return _maybe_add_count("us", delta / (pps // 1_000_000))
|
| 259 |
+
else:
|
| 260 |
+
# Nanoseconds
|
| 261 |
+
return _maybe_add_count("ns", delta)
|
| 262 |
+
|
| 263 |
+
@cache_readonly
|
| 264 |
+
def day_deltas(self) -> list[int]:
|
| 265 |
+
ppd = periods_per_day(self._creso)
|
| 266 |
+
return [x / ppd for x in self.deltas]
|
| 267 |
+
|
| 268 |
+
@cache_readonly
|
| 269 |
+
def hour_deltas(self) -> list[int]:
|
| 270 |
+
pph = periods_per_day(self._creso) // 24
|
| 271 |
+
return [x / pph for x in self.deltas]
|
| 272 |
+
|
| 273 |
+
@cache_readonly
|
| 274 |
+
def fields(self) -> np.ndarray: # structured array of fields
|
| 275 |
+
return build_field_sarray(self.i8values, reso=self._creso)
|
| 276 |
+
|
| 277 |
+
@cache_readonly
|
| 278 |
+
def rep_stamp(self) -> Timestamp:
|
| 279 |
+
return Timestamp(self.i8values[0], unit=self.index.unit)
|
| 280 |
+
|
| 281 |
+
def month_position_check(self) -> str | None:
|
| 282 |
+
return month_position_check(self.fields, self.index.dayofweek)
|
| 283 |
+
|
| 284 |
+
@cache_readonly
|
| 285 |
+
def mdiffs(self) -> npt.NDArray[np.int64]:
|
| 286 |
+
nmonths = self.fields["Y"] * 12 + self.fields["M"]
|
| 287 |
+
return unique_deltas(nmonths.astype("i8"))
|
| 288 |
+
|
| 289 |
+
@cache_readonly
|
| 290 |
+
def ydiffs(self) -> npt.NDArray[np.int64]:
|
| 291 |
+
return unique_deltas(self.fields["Y"].astype("i8"))
|
| 292 |
+
|
| 293 |
+
def _infer_daily_rule(self) -> str | None:
|
| 294 |
+
annual_rule = self._get_annual_rule()
|
| 295 |
+
if annual_rule:
|
| 296 |
+
nyears = self.ydiffs[0]
|
| 297 |
+
month = MONTH_ALIASES[self.rep_stamp.month]
|
| 298 |
+
alias = f"{annual_rule}-{month}"
|
| 299 |
+
return _maybe_add_count(alias, nyears)
|
| 300 |
+
|
| 301 |
+
quarterly_rule = self._get_quarterly_rule()
|
| 302 |
+
if quarterly_rule:
|
| 303 |
+
nquarters = self.mdiffs[0] / 3
|
| 304 |
+
mod_dict = {0: 12, 2: 11, 1: 10}
|
| 305 |
+
month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]]
|
| 306 |
+
alias = f"{quarterly_rule}-{month}"
|
| 307 |
+
return _maybe_add_count(alias, nquarters)
|
| 308 |
+
|
| 309 |
+
monthly_rule = self._get_monthly_rule()
|
| 310 |
+
if monthly_rule:
|
| 311 |
+
return _maybe_add_count(monthly_rule, self.mdiffs[0])
|
| 312 |
+
|
| 313 |
+
if self.is_unique:
|
| 314 |
+
return self._get_daily_rule()
|
| 315 |
+
|
| 316 |
+
if self._is_business_daily():
|
| 317 |
+
return "B"
|
| 318 |
+
|
| 319 |
+
wom_rule = self._get_wom_rule()
|
| 320 |
+
if wom_rule:
|
| 321 |
+
return wom_rule
|
| 322 |
+
|
| 323 |
+
return None
|
| 324 |
+
|
| 325 |
+
def _get_daily_rule(self) -> str | None:
|
| 326 |
+
ppd = periods_per_day(self._creso)
|
| 327 |
+
days = self.deltas[0] / ppd
|
| 328 |
+
if days % 7 == 0:
|
| 329 |
+
# Weekly
|
| 330 |
+
wd = int_to_weekday[self.rep_stamp.weekday()]
|
| 331 |
+
alias = f"W-{wd}"
|
| 332 |
+
return _maybe_add_count(alias, days / 7)
|
| 333 |
+
else:
|
| 334 |
+
return _maybe_add_count("D", days)
|
| 335 |
+
|
| 336 |
+
def _get_annual_rule(self) -> str | None:
|
| 337 |
+
if len(self.ydiffs) > 1:
|
| 338 |
+
return None
|
| 339 |
+
|
| 340 |
+
if len(unique(self.fields["M"])) > 1:
|
| 341 |
+
return None
|
| 342 |
+
|
| 343 |
+
pos_check = self.month_position_check()
|
| 344 |
+
|
| 345 |
+
if pos_check is None:
|
| 346 |
+
return None
|
| 347 |
+
else:
|
| 348 |
+
return {"cs": "YS", "bs": "BYS", "ce": "YE", "be": "BYE"}.get(pos_check)
|
| 349 |
+
|
| 350 |
+
def _get_quarterly_rule(self) -> str | None:
|
| 351 |
+
if len(self.mdiffs) > 1:
|
| 352 |
+
return None
|
| 353 |
+
|
| 354 |
+
if not self.mdiffs[0] % 3 == 0:
|
| 355 |
+
return None
|
| 356 |
+
|
| 357 |
+
pos_check = self.month_position_check()
|
| 358 |
+
|
| 359 |
+
if pos_check is None:
|
| 360 |
+
return None
|
| 361 |
+
else:
|
| 362 |
+
return {"cs": "QS", "bs": "BQS", "ce": "QE", "be": "BQE"}.get(pos_check)
|
| 363 |
+
|
| 364 |
+
def _get_monthly_rule(self) -> str | None:
|
| 365 |
+
if len(self.mdiffs) > 1:
|
| 366 |
+
return None
|
| 367 |
+
pos_check = self.month_position_check()
|
| 368 |
+
|
| 369 |
+
if pos_check is None:
|
| 370 |
+
return None
|
| 371 |
+
else:
|
| 372 |
+
return {"cs": "MS", "bs": "BMS", "ce": "ME", "be": "BME"}.get(pos_check)
|
| 373 |
+
|
| 374 |
+
def _is_business_daily(self) -> bool:
|
| 375 |
+
# quick check: cannot be business daily
|
| 376 |
+
if self.day_deltas != [1, 3]:
|
| 377 |
+
return False
|
| 378 |
+
|
| 379 |
+
# probably business daily, but need to confirm
|
| 380 |
+
first_weekday = self.index[0].weekday()
|
| 381 |
+
shifts = np.diff(self.i8values)
|
| 382 |
+
ppd = periods_per_day(self._creso)
|
| 383 |
+
shifts = np.floor_divide(shifts, ppd)
|
| 384 |
+
weekdays = np.mod(first_weekday + np.cumsum(shifts), 7)
|
| 385 |
+
|
| 386 |
+
return bool(
|
| 387 |
+
np.all(
|
| 388 |
+
((weekdays == 0) & (shifts == 3))
|
| 389 |
+
| ((weekdays > 0) & (weekdays <= 4) & (shifts == 1))
|
| 390 |
+
)
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
def _get_wom_rule(self) -> str | None:
|
| 394 |
+
weekdays = unique(self.index.weekday)
|
| 395 |
+
if len(weekdays) > 1:
|
| 396 |
+
return None
|
| 397 |
+
|
| 398 |
+
week_of_months = unique((self.index.day - 1) // 7)
|
| 399 |
+
# Only attempt to infer up to WOM-4. See #9425
|
| 400 |
+
week_of_months = week_of_months[week_of_months < 4]
|
| 401 |
+
if len(week_of_months) == 0 or len(week_of_months) > 1:
|
| 402 |
+
return None
|
| 403 |
+
|
| 404 |
+
# get which week
|
| 405 |
+
week = week_of_months[0] + 1
|
| 406 |
+
wd = int_to_weekday[weekdays[0]]
|
| 407 |
+
|
| 408 |
+
return f"WOM-{week}{wd}"
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
class _TimedeltaFrequencyInferer(_FrequencyInferer):
|
| 412 |
+
def _infer_daily_rule(self):
|
| 413 |
+
if self.is_unique:
|
| 414 |
+
return self._get_daily_rule()
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def _is_multiple(us, mult: int) -> bool:
|
| 418 |
+
return us % mult == 0
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def _maybe_add_count(base: str, count: float) -> str:
|
| 422 |
+
if count != 1:
|
| 423 |
+
assert count == int(count)
|
| 424 |
+
count = int(count)
|
| 425 |
+
return f"{count}{base}"
|
| 426 |
+
else:
|
| 427 |
+
return base
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
# ----------------------------------------------------------------------
|
| 431 |
+
# Frequency comparison
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
def is_subperiod(source, target) -> bool:
|
| 435 |
+
"""
|
| 436 |
+
Returns True if downsampling is possible between source and target
|
| 437 |
+
frequencies
|
| 438 |
+
|
| 439 |
+
Parameters
|
| 440 |
+
----------
|
| 441 |
+
source : str or DateOffset
|
| 442 |
+
Frequency converting from
|
| 443 |
+
target : str or DateOffset
|
| 444 |
+
Frequency converting to
|
| 445 |
+
|
| 446 |
+
Returns
|
| 447 |
+
-------
|
| 448 |
+
bool
|
| 449 |
+
"""
|
| 450 |
+
if target is None or source is None:
|
| 451 |
+
return False
|
| 452 |
+
source = _maybe_coerce_freq(source)
|
| 453 |
+
target = _maybe_coerce_freq(target)
|
| 454 |
+
|
| 455 |
+
if _is_annual(target):
|
| 456 |
+
if _is_quarterly(source):
|
| 457 |
+
return _quarter_months_conform(
|
| 458 |
+
get_rule_month(source), get_rule_month(target)
|
| 459 |
+
)
|
| 460 |
+
return source in {"D", "C", "B", "M", "h", "min", "s", "ms", "us", "ns"}
|
| 461 |
+
elif _is_quarterly(target):
|
| 462 |
+
return source in {"D", "C", "B", "M", "h", "min", "s", "ms", "us", "ns"}
|
| 463 |
+
elif _is_monthly(target):
|
| 464 |
+
return source in {"D", "C", "B", "h", "min", "s", "ms", "us", "ns"}
|
| 465 |
+
elif _is_weekly(target):
|
| 466 |
+
return source in {target, "D", "C", "B", "h", "min", "s", "ms", "us", "ns"}
|
| 467 |
+
elif target == "B":
|
| 468 |
+
return source in {"B", "h", "min", "s", "ms", "us", "ns"}
|
| 469 |
+
elif target == "C":
|
| 470 |
+
return source in {"C", "h", "min", "s", "ms", "us", "ns"}
|
| 471 |
+
elif target == "D":
|
| 472 |
+
return source in {"D", "h", "min", "s", "ms", "us", "ns"}
|
| 473 |
+
elif target == "h":
|
| 474 |
+
return source in {"h", "min", "s", "ms", "us", "ns"}
|
| 475 |
+
elif target == "min":
|
| 476 |
+
return source in {"min", "s", "ms", "us", "ns"}
|
| 477 |
+
elif target == "s":
|
| 478 |
+
return source in {"s", "ms", "us", "ns"}
|
| 479 |
+
elif target == "ms":
|
| 480 |
+
return source in {"ms", "us", "ns"}
|
| 481 |
+
elif target == "us":
|
| 482 |
+
return source in {"us", "ns"}
|
| 483 |
+
elif target == "ns":
|
| 484 |
+
return source in {"ns"}
|
| 485 |
+
else:
|
| 486 |
+
return False
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def is_superperiod(source, target) -> bool:
|
| 490 |
+
"""
|
| 491 |
+
Returns True if upsampling is possible between source and target
|
| 492 |
+
frequencies
|
| 493 |
+
|
| 494 |
+
Parameters
|
| 495 |
+
----------
|
| 496 |
+
source : str or DateOffset
|
| 497 |
+
Frequency converting from
|
| 498 |
+
target : str or DateOffset
|
| 499 |
+
Frequency converting to
|
| 500 |
+
|
| 501 |
+
Returns
|
| 502 |
+
-------
|
| 503 |
+
bool
|
| 504 |
+
"""
|
| 505 |
+
if target is None or source is None:
|
| 506 |
+
return False
|
| 507 |
+
source = _maybe_coerce_freq(source)
|
| 508 |
+
target = _maybe_coerce_freq(target)
|
| 509 |
+
|
| 510 |
+
if _is_annual(source):
|
| 511 |
+
if _is_annual(target):
|
| 512 |
+
return get_rule_month(source) == get_rule_month(target)
|
| 513 |
+
|
| 514 |
+
if _is_quarterly(target):
|
| 515 |
+
smonth = get_rule_month(source)
|
| 516 |
+
tmonth = get_rule_month(target)
|
| 517 |
+
return _quarter_months_conform(smonth, tmonth)
|
| 518 |
+
return target in {"D", "C", "B", "M", "h", "min", "s", "ms", "us", "ns"}
|
| 519 |
+
elif _is_quarterly(source):
|
| 520 |
+
return target in {"D", "C", "B", "M", "h", "min", "s", "ms", "us", "ns"}
|
| 521 |
+
elif _is_monthly(source):
|
| 522 |
+
return target in {"D", "C", "B", "h", "min", "s", "ms", "us", "ns"}
|
| 523 |
+
elif _is_weekly(source):
|
| 524 |
+
return target in {source, "D", "C", "B", "h", "min", "s", "ms", "us", "ns"}
|
| 525 |
+
elif source == "B":
|
| 526 |
+
return target in {"D", "C", "B", "h", "min", "s", "ms", "us", "ns"}
|
| 527 |
+
elif source == "C":
|
| 528 |
+
return target in {"D", "C", "B", "h", "min", "s", "ms", "us", "ns"}
|
| 529 |
+
elif source == "D":
|
| 530 |
+
return target in {"D", "C", "B", "h", "min", "s", "ms", "us", "ns"}
|
| 531 |
+
elif source == "h":
|
| 532 |
+
return target in {"h", "min", "s", "ms", "us", "ns"}
|
| 533 |
+
elif source == "min":
|
| 534 |
+
return target in {"min", "s", "ms", "us", "ns"}
|
| 535 |
+
elif source == "s":
|
| 536 |
+
return target in {"s", "ms", "us", "ns"}
|
| 537 |
+
elif source == "ms":
|
| 538 |
+
return target in {"ms", "us", "ns"}
|
| 539 |
+
elif source == "us":
|
| 540 |
+
return target in {"us", "ns"}
|
| 541 |
+
elif source == "ns":
|
| 542 |
+
return target in {"ns"}
|
| 543 |
+
else:
|
| 544 |
+
return False
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def _maybe_coerce_freq(code) -> str:
|
| 548 |
+
"""we might need to coerce a code to a rule_code
|
| 549 |
+
and uppercase it
|
| 550 |
+
|
| 551 |
+
Parameters
|
| 552 |
+
----------
|
| 553 |
+
source : str or DateOffset
|
| 554 |
+
Frequency converting from
|
| 555 |
+
|
| 556 |
+
Returns
|
| 557 |
+
-------
|
| 558 |
+
str
|
| 559 |
+
"""
|
| 560 |
+
assert code is not None
|
| 561 |
+
if isinstance(code, DateOffset):
|
| 562 |
+
code = freq_to_period_freqstr(1, code.name)
|
| 563 |
+
if code in {"h", "min", "s", "ms", "us", "ns"}:
|
| 564 |
+
return code
|
| 565 |
+
else:
|
| 566 |
+
return code.upper()
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
def _quarter_months_conform(source: str, target: str) -> bool:
|
| 570 |
+
snum = MONTH_NUMBERS[source]
|
| 571 |
+
tnum = MONTH_NUMBERS[target]
|
| 572 |
+
return snum % 3 == tnum % 3
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
def _is_annual(rule: str) -> bool:
|
| 576 |
+
rule = rule.upper()
|
| 577 |
+
return rule == "Y" or rule.startswith("Y-")
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
def _is_quarterly(rule: str) -> bool:
|
| 581 |
+
rule = rule.upper()
|
| 582 |
+
return rule == "Q" or rule.startswith(("Q-", "BQ"))
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
def _is_monthly(rule: str) -> bool:
|
| 586 |
+
rule = rule.upper()
|
| 587 |
+
return rule in ("M", "BM")
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
def _is_weekly(rule: str) -> bool:
|
| 591 |
+
rule = rule.upper()
|
| 592 |
+
return rule == "W" or rule.startswith("W-")
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
__all__ = [
|
| 596 |
+
"Day",
|
| 597 |
+
"get_period_alias",
|
| 598 |
+
"infer_freq",
|
| 599 |
+
"is_subperiod",
|
| 600 |
+
"is_superperiod",
|
| 601 |
+
"to_offset",
|
| 602 |
+
]
|
videollama2/lib/python3.10/site-packages/pandas/tseries/holiday.py
ADDED
|
@@ -0,0 +1,634 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from datetime import (
|
| 4 |
+
datetime,
|
| 5 |
+
timedelta,
|
| 6 |
+
)
|
| 7 |
+
import warnings
|
| 8 |
+
|
| 9 |
+
from dateutil.relativedelta import (
|
| 10 |
+
FR,
|
| 11 |
+
MO,
|
| 12 |
+
SA,
|
| 13 |
+
SU,
|
| 14 |
+
TH,
|
| 15 |
+
TU,
|
| 16 |
+
WE,
|
| 17 |
+
)
|
| 18 |
+
import numpy as np
|
| 19 |
+
|
| 20 |
+
from pandas.errors import PerformanceWarning
|
| 21 |
+
|
| 22 |
+
from pandas import (
|
| 23 |
+
DateOffset,
|
| 24 |
+
DatetimeIndex,
|
| 25 |
+
Series,
|
| 26 |
+
Timestamp,
|
| 27 |
+
concat,
|
| 28 |
+
date_range,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
from pandas.tseries.offsets import (
|
| 32 |
+
Day,
|
| 33 |
+
Easter,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def next_monday(dt: datetime) -> datetime:
|
| 38 |
+
"""
|
| 39 |
+
If holiday falls on Saturday, use following Monday instead;
|
| 40 |
+
if holiday falls on Sunday, use Monday instead
|
| 41 |
+
"""
|
| 42 |
+
if dt.weekday() == 5:
|
| 43 |
+
return dt + timedelta(2)
|
| 44 |
+
elif dt.weekday() == 6:
|
| 45 |
+
return dt + timedelta(1)
|
| 46 |
+
return dt
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def next_monday_or_tuesday(dt: datetime) -> datetime:
|
| 50 |
+
"""
|
| 51 |
+
For second holiday of two adjacent ones!
|
| 52 |
+
If holiday falls on Saturday, use following Monday instead;
|
| 53 |
+
if holiday falls on Sunday or Monday, use following Tuesday instead
|
| 54 |
+
(because Monday is already taken by adjacent holiday on the day before)
|
| 55 |
+
"""
|
| 56 |
+
dow = dt.weekday()
|
| 57 |
+
if dow in (5, 6):
|
| 58 |
+
return dt + timedelta(2)
|
| 59 |
+
if dow == 0:
|
| 60 |
+
return dt + timedelta(1)
|
| 61 |
+
return dt
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def previous_friday(dt: datetime) -> datetime:
|
| 65 |
+
"""
|
| 66 |
+
If holiday falls on Saturday or Sunday, use previous Friday instead.
|
| 67 |
+
"""
|
| 68 |
+
if dt.weekday() == 5:
|
| 69 |
+
return dt - timedelta(1)
|
| 70 |
+
elif dt.weekday() == 6:
|
| 71 |
+
return dt - timedelta(2)
|
| 72 |
+
return dt
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def sunday_to_monday(dt: datetime) -> datetime:
|
| 76 |
+
"""
|
| 77 |
+
If holiday falls on Sunday, use day thereafter (Monday) instead.
|
| 78 |
+
"""
|
| 79 |
+
if dt.weekday() == 6:
|
| 80 |
+
return dt + timedelta(1)
|
| 81 |
+
return dt
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def weekend_to_monday(dt: datetime) -> datetime:
|
| 85 |
+
"""
|
| 86 |
+
If holiday falls on Sunday or Saturday,
|
| 87 |
+
use day thereafter (Monday) instead.
|
| 88 |
+
Needed for holidays such as Christmas observation in Europe
|
| 89 |
+
"""
|
| 90 |
+
if dt.weekday() == 6:
|
| 91 |
+
return dt + timedelta(1)
|
| 92 |
+
elif dt.weekday() == 5:
|
| 93 |
+
return dt + timedelta(2)
|
| 94 |
+
return dt
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def nearest_workday(dt: datetime) -> datetime:
|
| 98 |
+
"""
|
| 99 |
+
If holiday falls on Saturday, use day before (Friday) instead;
|
| 100 |
+
if holiday falls on Sunday, use day thereafter (Monday) instead.
|
| 101 |
+
"""
|
| 102 |
+
if dt.weekday() == 5:
|
| 103 |
+
return dt - timedelta(1)
|
| 104 |
+
elif dt.weekday() == 6:
|
| 105 |
+
return dt + timedelta(1)
|
| 106 |
+
return dt
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def next_workday(dt: datetime) -> datetime:
|
| 110 |
+
"""
|
| 111 |
+
returns next weekday used for observances
|
| 112 |
+
"""
|
| 113 |
+
dt += timedelta(days=1)
|
| 114 |
+
while dt.weekday() > 4:
|
| 115 |
+
# Mon-Fri are 0-4
|
| 116 |
+
dt += timedelta(days=1)
|
| 117 |
+
return dt
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def previous_workday(dt: datetime) -> datetime:
|
| 121 |
+
"""
|
| 122 |
+
returns previous weekday used for observances
|
| 123 |
+
"""
|
| 124 |
+
dt -= timedelta(days=1)
|
| 125 |
+
while dt.weekday() > 4:
|
| 126 |
+
# Mon-Fri are 0-4
|
| 127 |
+
dt -= timedelta(days=1)
|
| 128 |
+
return dt
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def before_nearest_workday(dt: datetime) -> datetime:
|
| 132 |
+
"""
|
| 133 |
+
returns previous workday after nearest workday
|
| 134 |
+
"""
|
| 135 |
+
return previous_workday(nearest_workday(dt))
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def after_nearest_workday(dt: datetime) -> datetime:
|
| 139 |
+
"""
|
| 140 |
+
returns next workday after nearest workday
|
| 141 |
+
needed for Boxing day or multiple holidays in a series
|
| 142 |
+
"""
|
| 143 |
+
return next_workday(nearest_workday(dt))
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class Holiday:
|
| 147 |
+
"""
|
| 148 |
+
Class that defines a holiday with start/end dates and rules
|
| 149 |
+
for observance.
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
start_date: Timestamp | None
|
| 153 |
+
end_date: Timestamp | None
|
| 154 |
+
days_of_week: tuple[int, ...] | None
|
| 155 |
+
|
| 156 |
+
def __init__(
|
| 157 |
+
self,
|
| 158 |
+
name: str,
|
| 159 |
+
year=None,
|
| 160 |
+
month=None,
|
| 161 |
+
day=None,
|
| 162 |
+
offset=None,
|
| 163 |
+
observance=None,
|
| 164 |
+
start_date=None,
|
| 165 |
+
end_date=None,
|
| 166 |
+
days_of_week=None,
|
| 167 |
+
) -> None:
|
| 168 |
+
"""
|
| 169 |
+
Parameters
|
| 170 |
+
----------
|
| 171 |
+
name : str
|
| 172 |
+
Name of the holiday , defaults to class name
|
| 173 |
+
offset : array of pandas.tseries.offsets or
|
| 174 |
+
class from pandas.tseries.offsets
|
| 175 |
+
computes offset from date
|
| 176 |
+
observance: function
|
| 177 |
+
computes when holiday is given a pandas Timestamp
|
| 178 |
+
days_of_week:
|
| 179 |
+
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
|
| 180 |
+
Monday=0,..,Sunday=6
|
| 181 |
+
|
| 182 |
+
Examples
|
| 183 |
+
--------
|
| 184 |
+
>>> from dateutil.relativedelta import MO
|
| 185 |
+
|
| 186 |
+
>>> USMemorialDay = pd.tseries.holiday.Holiday(
|
| 187 |
+
... "Memorial Day", month=5, day=31, offset=pd.DateOffset(weekday=MO(-1))
|
| 188 |
+
... )
|
| 189 |
+
>>> USMemorialDay
|
| 190 |
+
Holiday: Memorial Day (month=5, day=31, offset=<DateOffset: weekday=MO(-1)>)
|
| 191 |
+
|
| 192 |
+
>>> USLaborDay = pd.tseries.holiday.Holiday(
|
| 193 |
+
... "Labor Day", month=9, day=1, offset=pd.DateOffset(weekday=MO(1))
|
| 194 |
+
... )
|
| 195 |
+
>>> USLaborDay
|
| 196 |
+
Holiday: Labor Day (month=9, day=1, offset=<DateOffset: weekday=MO(+1)>)
|
| 197 |
+
|
| 198 |
+
>>> July3rd = pd.tseries.holiday.Holiday("July 3rd", month=7, day=3)
|
| 199 |
+
>>> July3rd
|
| 200 |
+
Holiday: July 3rd (month=7, day=3, )
|
| 201 |
+
|
| 202 |
+
>>> NewYears = pd.tseries.holiday.Holiday(
|
| 203 |
+
... "New Years Day", month=1, day=1,
|
| 204 |
+
... observance=pd.tseries.holiday.nearest_workday
|
| 205 |
+
... )
|
| 206 |
+
>>> NewYears # doctest: +SKIP
|
| 207 |
+
Holiday: New Years Day (
|
| 208 |
+
month=1, day=1, observance=<function nearest_workday at 0x66545e9bc440>
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
>>> July3rd = pd.tseries.holiday.Holiday(
|
| 212 |
+
... "July 3rd", month=7, day=3,
|
| 213 |
+
... days_of_week=(0, 1, 2, 3)
|
| 214 |
+
... )
|
| 215 |
+
>>> July3rd
|
| 216 |
+
Holiday: July 3rd (month=7, day=3, )
|
| 217 |
+
"""
|
| 218 |
+
if offset is not None and observance is not None:
|
| 219 |
+
raise NotImplementedError("Cannot use both offset and observance.")
|
| 220 |
+
|
| 221 |
+
self.name = name
|
| 222 |
+
self.year = year
|
| 223 |
+
self.month = month
|
| 224 |
+
self.day = day
|
| 225 |
+
self.offset = offset
|
| 226 |
+
self.start_date = (
|
| 227 |
+
Timestamp(start_date) if start_date is not None else start_date
|
| 228 |
+
)
|
| 229 |
+
self.end_date = Timestamp(end_date) if end_date is not None else end_date
|
| 230 |
+
self.observance = observance
|
| 231 |
+
assert days_of_week is None or type(days_of_week) == tuple
|
| 232 |
+
self.days_of_week = days_of_week
|
| 233 |
+
|
| 234 |
+
def __repr__(self) -> str:
|
| 235 |
+
info = ""
|
| 236 |
+
if self.year is not None:
|
| 237 |
+
info += f"year={self.year}, "
|
| 238 |
+
info += f"month={self.month}, day={self.day}, "
|
| 239 |
+
|
| 240 |
+
if self.offset is not None:
|
| 241 |
+
info += f"offset={self.offset}"
|
| 242 |
+
|
| 243 |
+
if self.observance is not None:
|
| 244 |
+
info += f"observance={self.observance}"
|
| 245 |
+
|
| 246 |
+
repr = f"Holiday: {self.name} ({info})"
|
| 247 |
+
return repr
|
| 248 |
+
|
| 249 |
+
def dates(
|
| 250 |
+
self, start_date, end_date, return_name: bool = False
|
| 251 |
+
) -> Series | DatetimeIndex:
|
| 252 |
+
"""
|
| 253 |
+
Calculate holidays observed between start date and end date
|
| 254 |
+
|
| 255 |
+
Parameters
|
| 256 |
+
----------
|
| 257 |
+
start_date : starting date, datetime-like, optional
|
| 258 |
+
end_date : ending date, datetime-like, optional
|
| 259 |
+
return_name : bool, optional, default=False
|
| 260 |
+
If True, return a series that has dates and holiday names.
|
| 261 |
+
False will only return dates.
|
| 262 |
+
|
| 263 |
+
Returns
|
| 264 |
+
-------
|
| 265 |
+
Series or DatetimeIndex
|
| 266 |
+
Series if return_name is True
|
| 267 |
+
"""
|
| 268 |
+
start_date = Timestamp(start_date)
|
| 269 |
+
end_date = Timestamp(end_date)
|
| 270 |
+
|
| 271 |
+
filter_start_date = start_date
|
| 272 |
+
filter_end_date = end_date
|
| 273 |
+
|
| 274 |
+
if self.year is not None:
|
| 275 |
+
dt = Timestamp(datetime(self.year, self.month, self.day))
|
| 276 |
+
dti = DatetimeIndex([dt])
|
| 277 |
+
if return_name:
|
| 278 |
+
return Series(self.name, index=dti)
|
| 279 |
+
else:
|
| 280 |
+
return dti
|
| 281 |
+
|
| 282 |
+
dates = self._reference_dates(start_date, end_date)
|
| 283 |
+
holiday_dates = self._apply_rule(dates)
|
| 284 |
+
if self.days_of_week is not None:
|
| 285 |
+
holiday_dates = holiday_dates[
|
| 286 |
+
np.isin(
|
| 287 |
+
# error: "DatetimeIndex" has no attribute "dayofweek"
|
| 288 |
+
holiday_dates.dayofweek, # type: ignore[attr-defined]
|
| 289 |
+
self.days_of_week,
|
| 290 |
+
).ravel()
|
| 291 |
+
]
|
| 292 |
+
|
| 293 |
+
if self.start_date is not None:
|
| 294 |
+
filter_start_date = max(
|
| 295 |
+
self.start_date.tz_localize(filter_start_date.tz), filter_start_date
|
| 296 |
+
)
|
| 297 |
+
if self.end_date is not None:
|
| 298 |
+
filter_end_date = min(
|
| 299 |
+
self.end_date.tz_localize(filter_end_date.tz), filter_end_date
|
| 300 |
+
)
|
| 301 |
+
holiday_dates = holiday_dates[
|
| 302 |
+
(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)
|
| 303 |
+
]
|
| 304 |
+
if return_name:
|
| 305 |
+
return Series(self.name, index=holiday_dates)
|
| 306 |
+
return holiday_dates
|
| 307 |
+
|
| 308 |
+
def _reference_dates(
|
| 309 |
+
self, start_date: Timestamp, end_date: Timestamp
|
| 310 |
+
) -> DatetimeIndex:
|
| 311 |
+
"""
|
| 312 |
+
Get reference dates for the holiday.
|
| 313 |
+
|
| 314 |
+
Return reference dates for the holiday also returning the year
|
| 315 |
+
prior to the start_date and year following the end_date. This ensures
|
| 316 |
+
that any offsets to be applied will yield the holidays within
|
| 317 |
+
the passed in dates.
|
| 318 |
+
"""
|
| 319 |
+
if self.start_date is not None:
|
| 320 |
+
start_date = self.start_date.tz_localize(start_date.tz)
|
| 321 |
+
|
| 322 |
+
if self.end_date is not None:
|
| 323 |
+
end_date = self.end_date.tz_localize(start_date.tz)
|
| 324 |
+
|
| 325 |
+
year_offset = DateOffset(years=1)
|
| 326 |
+
reference_start_date = Timestamp(
|
| 327 |
+
datetime(start_date.year - 1, self.month, self.day)
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
reference_end_date = Timestamp(
|
| 331 |
+
datetime(end_date.year + 1, self.month, self.day)
|
| 332 |
+
)
|
| 333 |
+
# Don't process unnecessary holidays
|
| 334 |
+
dates = date_range(
|
| 335 |
+
start=reference_start_date,
|
| 336 |
+
end=reference_end_date,
|
| 337 |
+
freq=year_offset,
|
| 338 |
+
tz=start_date.tz,
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
return dates
|
| 342 |
+
|
| 343 |
+
def _apply_rule(self, dates: DatetimeIndex) -> DatetimeIndex:
|
| 344 |
+
"""
|
| 345 |
+
Apply the given offset/observance to a DatetimeIndex of dates.
|
| 346 |
+
|
| 347 |
+
Parameters
|
| 348 |
+
----------
|
| 349 |
+
dates : DatetimeIndex
|
| 350 |
+
Dates to apply the given offset/observance rule
|
| 351 |
+
|
| 352 |
+
Returns
|
| 353 |
+
-------
|
| 354 |
+
Dates with rules applied
|
| 355 |
+
"""
|
| 356 |
+
if dates.empty:
|
| 357 |
+
return dates.copy()
|
| 358 |
+
|
| 359 |
+
if self.observance is not None:
|
| 360 |
+
return dates.map(lambda d: self.observance(d))
|
| 361 |
+
|
| 362 |
+
if self.offset is not None:
|
| 363 |
+
if not isinstance(self.offset, list):
|
| 364 |
+
offsets = [self.offset]
|
| 365 |
+
else:
|
| 366 |
+
offsets = self.offset
|
| 367 |
+
for offset in offsets:
|
| 368 |
+
# if we are adding a non-vectorized value
|
| 369 |
+
# ignore the PerformanceWarnings:
|
| 370 |
+
with warnings.catch_warnings():
|
| 371 |
+
warnings.simplefilter("ignore", PerformanceWarning)
|
| 372 |
+
dates += offset
|
| 373 |
+
return dates
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
holiday_calendars = {}
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def register(cls) -> None:
|
| 380 |
+
try:
|
| 381 |
+
name = cls.name
|
| 382 |
+
except AttributeError:
|
| 383 |
+
name = cls.__name__
|
| 384 |
+
holiday_calendars[name] = cls
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def get_calendar(name: str):
|
| 388 |
+
"""
|
| 389 |
+
Return an instance of a calendar based on its name.
|
| 390 |
+
|
| 391 |
+
Parameters
|
| 392 |
+
----------
|
| 393 |
+
name : str
|
| 394 |
+
Calendar name to return an instance of
|
| 395 |
+
"""
|
| 396 |
+
return holiday_calendars[name]()
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
class HolidayCalendarMetaClass(type):
|
| 400 |
+
def __new__(cls, clsname: str, bases, attrs):
|
| 401 |
+
calendar_class = super().__new__(cls, clsname, bases, attrs)
|
| 402 |
+
register(calendar_class)
|
| 403 |
+
return calendar_class
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
class AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass):
|
| 407 |
+
"""
|
| 408 |
+
Abstract interface to create holidays following certain rules.
|
| 409 |
+
"""
|
| 410 |
+
|
| 411 |
+
rules: list[Holiday] = []
|
| 412 |
+
start_date = Timestamp(datetime(1970, 1, 1))
|
| 413 |
+
end_date = Timestamp(datetime(2200, 12, 31))
|
| 414 |
+
_cache = None
|
| 415 |
+
|
| 416 |
+
def __init__(self, name: str = "", rules=None) -> None:
|
| 417 |
+
"""
|
| 418 |
+
Initializes holiday object with a given set a rules. Normally
|
| 419 |
+
classes just have the rules defined within them.
|
| 420 |
+
|
| 421 |
+
Parameters
|
| 422 |
+
----------
|
| 423 |
+
name : str
|
| 424 |
+
Name of the holiday calendar, defaults to class name
|
| 425 |
+
rules : array of Holiday objects
|
| 426 |
+
A set of rules used to create the holidays.
|
| 427 |
+
"""
|
| 428 |
+
super().__init__()
|
| 429 |
+
if not name:
|
| 430 |
+
name = type(self).__name__
|
| 431 |
+
self.name = name
|
| 432 |
+
|
| 433 |
+
if rules is not None:
|
| 434 |
+
self.rules = rules
|
| 435 |
+
|
| 436 |
+
def rule_from_name(self, name: str):
|
| 437 |
+
for rule in self.rules:
|
| 438 |
+
if rule.name == name:
|
| 439 |
+
return rule
|
| 440 |
+
|
| 441 |
+
return None
|
| 442 |
+
|
| 443 |
+
def holidays(self, start=None, end=None, return_name: bool = False):
|
| 444 |
+
"""
|
| 445 |
+
Returns a curve with holidays between start_date and end_date
|
| 446 |
+
|
| 447 |
+
Parameters
|
| 448 |
+
----------
|
| 449 |
+
start : starting date, datetime-like, optional
|
| 450 |
+
end : ending date, datetime-like, optional
|
| 451 |
+
return_name : bool, optional
|
| 452 |
+
If True, return a series that has dates and holiday names.
|
| 453 |
+
False will only return a DatetimeIndex of dates.
|
| 454 |
+
|
| 455 |
+
Returns
|
| 456 |
+
-------
|
| 457 |
+
DatetimeIndex of holidays
|
| 458 |
+
"""
|
| 459 |
+
if self.rules is None:
|
| 460 |
+
raise Exception(
|
| 461 |
+
f"Holiday Calendar {self.name} does not have any rules specified"
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
if start is None:
|
| 465 |
+
start = AbstractHolidayCalendar.start_date
|
| 466 |
+
|
| 467 |
+
if end is None:
|
| 468 |
+
end = AbstractHolidayCalendar.end_date
|
| 469 |
+
|
| 470 |
+
start = Timestamp(start)
|
| 471 |
+
end = Timestamp(end)
|
| 472 |
+
|
| 473 |
+
# If we don't have a cache or the dates are outside the prior cache, we
|
| 474 |
+
# get them again
|
| 475 |
+
if self._cache is None or start < self._cache[0] or end > self._cache[1]:
|
| 476 |
+
pre_holidays = [
|
| 477 |
+
rule.dates(start, end, return_name=True) for rule in self.rules
|
| 478 |
+
]
|
| 479 |
+
if pre_holidays:
|
| 480 |
+
# error: Argument 1 to "concat" has incompatible type
|
| 481 |
+
# "List[Union[Series, DatetimeIndex]]"; expected
|
| 482 |
+
# "Union[Iterable[DataFrame], Mapping[<nothing>, DataFrame]]"
|
| 483 |
+
holidays = concat(pre_holidays) # type: ignore[arg-type]
|
| 484 |
+
else:
|
| 485 |
+
# error: Incompatible types in assignment (expression has type
|
| 486 |
+
# "Series", variable has type "DataFrame")
|
| 487 |
+
holidays = Series(
|
| 488 |
+
index=DatetimeIndex([]), dtype=object
|
| 489 |
+
) # type: ignore[assignment]
|
| 490 |
+
|
| 491 |
+
self._cache = (start, end, holidays.sort_index())
|
| 492 |
+
|
| 493 |
+
holidays = self._cache[2]
|
| 494 |
+
holidays = holidays[start:end]
|
| 495 |
+
|
| 496 |
+
if return_name:
|
| 497 |
+
return holidays
|
| 498 |
+
else:
|
| 499 |
+
return holidays.index
|
| 500 |
+
|
| 501 |
+
@staticmethod
|
| 502 |
+
def merge_class(base, other):
|
| 503 |
+
"""
|
| 504 |
+
Merge holiday calendars together. The base calendar
|
| 505 |
+
will take precedence to other. The merge will be done
|
| 506 |
+
based on each holiday's name.
|
| 507 |
+
|
| 508 |
+
Parameters
|
| 509 |
+
----------
|
| 510 |
+
base : AbstractHolidayCalendar
|
| 511 |
+
instance/subclass or array of Holiday objects
|
| 512 |
+
other : AbstractHolidayCalendar
|
| 513 |
+
instance/subclass or array of Holiday objects
|
| 514 |
+
"""
|
| 515 |
+
try:
|
| 516 |
+
other = other.rules
|
| 517 |
+
except AttributeError:
|
| 518 |
+
pass
|
| 519 |
+
|
| 520 |
+
if not isinstance(other, list):
|
| 521 |
+
other = [other]
|
| 522 |
+
other_holidays = {holiday.name: holiday for holiday in other}
|
| 523 |
+
|
| 524 |
+
try:
|
| 525 |
+
base = base.rules
|
| 526 |
+
except AttributeError:
|
| 527 |
+
pass
|
| 528 |
+
|
| 529 |
+
if not isinstance(base, list):
|
| 530 |
+
base = [base]
|
| 531 |
+
base_holidays = {holiday.name: holiday for holiday in base}
|
| 532 |
+
|
| 533 |
+
other_holidays.update(base_holidays)
|
| 534 |
+
return list(other_holidays.values())
|
| 535 |
+
|
| 536 |
+
def merge(self, other, inplace: bool = False):
|
| 537 |
+
"""
|
| 538 |
+
Merge holiday calendars together. The caller's class
|
| 539 |
+
rules take precedence. The merge will be done
|
| 540 |
+
based on each holiday's name.
|
| 541 |
+
|
| 542 |
+
Parameters
|
| 543 |
+
----------
|
| 544 |
+
other : holiday calendar
|
| 545 |
+
inplace : bool (default=False)
|
| 546 |
+
If True set rule_table to holidays, else return array of Holidays
|
| 547 |
+
"""
|
| 548 |
+
holidays = self.merge_class(self, other)
|
| 549 |
+
if inplace:
|
| 550 |
+
self.rules = holidays
|
| 551 |
+
else:
|
| 552 |
+
return holidays
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
USMemorialDay = Holiday(
|
| 556 |
+
"Memorial Day", month=5, day=31, offset=DateOffset(weekday=MO(-1))
|
| 557 |
+
)
|
| 558 |
+
USLaborDay = Holiday("Labor Day", month=9, day=1, offset=DateOffset(weekday=MO(1)))
|
| 559 |
+
USColumbusDay = Holiday(
|
| 560 |
+
"Columbus Day", month=10, day=1, offset=DateOffset(weekday=MO(2))
|
| 561 |
+
)
|
| 562 |
+
USThanksgivingDay = Holiday(
|
| 563 |
+
"Thanksgiving Day", month=11, day=1, offset=DateOffset(weekday=TH(4))
|
| 564 |
+
)
|
| 565 |
+
USMartinLutherKingJr = Holiday(
|
| 566 |
+
"Birthday of Martin Luther King, Jr.",
|
| 567 |
+
start_date=datetime(1986, 1, 1),
|
| 568 |
+
month=1,
|
| 569 |
+
day=1,
|
| 570 |
+
offset=DateOffset(weekday=MO(3)),
|
| 571 |
+
)
|
| 572 |
+
USPresidentsDay = Holiday(
|
| 573 |
+
"Washington's Birthday", month=2, day=1, offset=DateOffset(weekday=MO(3))
|
| 574 |
+
)
|
| 575 |
+
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
|
| 576 |
+
|
| 577 |
+
EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)])
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
class USFederalHolidayCalendar(AbstractHolidayCalendar):
|
| 581 |
+
"""
|
| 582 |
+
US Federal Government Holiday Calendar based on rules specified by:
|
| 583 |
+
https://www.opm.gov/policy-data-oversight/pay-leave/federal-holidays/
|
| 584 |
+
"""
|
| 585 |
+
|
| 586 |
+
rules = [
|
| 587 |
+
Holiday("New Year's Day", month=1, day=1, observance=nearest_workday),
|
| 588 |
+
USMartinLutherKingJr,
|
| 589 |
+
USPresidentsDay,
|
| 590 |
+
USMemorialDay,
|
| 591 |
+
Holiday(
|
| 592 |
+
"Juneteenth National Independence Day",
|
| 593 |
+
month=6,
|
| 594 |
+
day=19,
|
| 595 |
+
start_date="2021-06-18",
|
| 596 |
+
observance=nearest_workday,
|
| 597 |
+
),
|
| 598 |
+
Holiday("Independence Day", month=7, day=4, observance=nearest_workday),
|
| 599 |
+
USLaborDay,
|
| 600 |
+
USColumbusDay,
|
| 601 |
+
Holiday("Veterans Day", month=11, day=11, observance=nearest_workday),
|
| 602 |
+
USThanksgivingDay,
|
| 603 |
+
Holiday("Christmas Day", month=12, day=25, observance=nearest_workday),
|
| 604 |
+
]
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
def HolidayCalendarFactory(name: str, base, other, base_class=AbstractHolidayCalendar):
|
| 608 |
+
rules = AbstractHolidayCalendar.merge_class(base, other)
|
| 609 |
+
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
|
| 610 |
+
return calendar_class
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
__all__ = [
|
| 614 |
+
"after_nearest_workday",
|
| 615 |
+
"before_nearest_workday",
|
| 616 |
+
"FR",
|
| 617 |
+
"get_calendar",
|
| 618 |
+
"HolidayCalendarFactory",
|
| 619 |
+
"MO",
|
| 620 |
+
"nearest_workday",
|
| 621 |
+
"next_monday",
|
| 622 |
+
"next_monday_or_tuesday",
|
| 623 |
+
"next_workday",
|
| 624 |
+
"previous_friday",
|
| 625 |
+
"previous_workday",
|
| 626 |
+
"register",
|
| 627 |
+
"SA",
|
| 628 |
+
"SU",
|
| 629 |
+
"sunday_to_monday",
|
| 630 |
+
"TH",
|
| 631 |
+
"TU",
|
| 632 |
+
"WE",
|
| 633 |
+
"weekend_to_monday",
|
| 634 |
+
]
|
videollama2/lib/python3.10/site-packages/pandas/tseries/offsets.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from pandas._libs.tslibs.offsets import (
|
| 4 |
+
FY5253,
|
| 5 |
+
BaseOffset,
|
| 6 |
+
BDay,
|
| 7 |
+
BMonthBegin,
|
| 8 |
+
BMonthEnd,
|
| 9 |
+
BQuarterBegin,
|
| 10 |
+
BQuarterEnd,
|
| 11 |
+
BusinessDay,
|
| 12 |
+
BusinessHour,
|
| 13 |
+
BusinessMonthBegin,
|
| 14 |
+
BusinessMonthEnd,
|
| 15 |
+
BYearBegin,
|
| 16 |
+
BYearEnd,
|
| 17 |
+
CBMonthBegin,
|
| 18 |
+
CBMonthEnd,
|
| 19 |
+
CDay,
|
| 20 |
+
CustomBusinessDay,
|
| 21 |
+
CustomBusinessHour,
|
| 22 |
+
CustomBusinessMonthBegin,
|
| 23 |
+
CustomBusinessMonthEnd,
|
| 24 |
+
DateOffset,
|
| 25 |
+
Day,
|
| 26 |
+
Easter,
|
| 27 |
+
FY5253Quarter,
|
| 28 |
+
Hour,
|
| 29 |
+
LastWeekOfMonth,
|
| 30 |
+
Micro,
|
| 31 |
+
Milli,
|
| 32 |
+
Minute,
|
| 33 |
+
MonthBegin,
|
| 34 |
+
MonthEnd,
|
| 35 |
+
Nano,
|
| 36 |
+
QuarterBegin,
|
| 37 |
+
QuarterEnd,
|
| 38 |
+
Second,
|
| 39 |
+
SemiMonthBegin,
|
| 40 |
+
SemiMonthEnd,
|
| 41 |
+
Tick,
|
| 42 |
+
Week,
|
| 43 |
+
WeekOfMonth,
|
| 44 |
+
YearBegin,
|
| 45 |
+
YearEnd,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
__all__ = [
|
| 49 |
+
"Day",
|
| 50 |
+
"BaseOffset",
|
| 51 |
+
"BusinessDay",
|
| 52 |
+
"BusinessMonthBegin",
|
| 53 |
+
"BusinessMonthEnd",
|
| 54 |
+
"BDay",
|
| 55 |
+
"CustomBusinessDay",
|
| 56 |
+
"CustomBusinessMonthBegin",
|
| 57 |
+
"CustomBusinessMonthEnd",
|
| 58 |
+
"CDay",
|
| 59 |
+
"CBMonthEnd",
|
| 60 |
+
"CBMonthBegin",
|
| 61 |
+
"MonthBegin",
|
| 62 |
+
"BMonthBegin",
|
| 63 |
+
"MonthEnd",
|
| 64 |
+
"BMonthEnd",
|
| 65 |
+
"SemiMonthEnd",
|
| 66 |
+
"SemiMonthBegin",
|
| 67 |
+
"BusinessHour",
|
| 68 |
+
"CustomBusinessHour",
|
| 69 |
+
"YearBegin",
|
| 70 |
+
"BYearBegin",
|
| 71 |
+
"YearEnd",
|
| 72 |
+
"BYearEnd",
|
| 73 |
+
"QuarterBegin",
|
| 74 |
+
"BQuarterBegin",
|
| 75 |
+
"QuarterEnd",
|
| 76 |
+
"BQuarterEnd",
|
| 77 |
+
"LastWeekOfMonth",
|
| 78 |
+
"FY5253Quarter",
|
| 79 |
+
"FY5253",
|
| 80 |
+
"Week",
|
| 81 |
+
"WeekOfMonth",
|
| 82 |
+
"Easter",
|
| 83 |
+
"Tick",
|
| 84 |
+
"Hour",
|
| 85 |
+
"Minute",
|
| 86 |
+
"Second",
|
| 87 |
+
"Milli",
|
| 88 |
+
"Micro",
|
| 89 |
+
"Nano",
|
| 90 |
+
"DateOffset",
|
| 91 |
+
]
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""OpenGL Extensions"""
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (193 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__pycache__/pack_invert.cpython-310.pyc
ADDED
|
Binary file (1.62 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__pycache__/resize_buffers.cpython-310.pyc
ADDED
|
Binary file (1.81 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__pycache__/window_pos.cpython-310.pyc
ADDED
|
Binary file (2.34 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/__pycache__/ycbcr_texture.cpython-310.pyc
ADDED
|
Binary file (1.81 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/pack_invert.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension MESA.pack_invert
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.MESA.pack_invert to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension adds a new pixel storage parameter to indicate that
|
| 10 |
+
images are to be packed in top-to-bottom order instead of OpenGL's
|
| 11 |
+
conventional bottom-to-top order. Only pixel packing can be
|
| 12 |
+
inverted (i.e. for glReadPixels, glGetTexImage, glGetConvolutionFilter,
|
| 13 |
+
etc).
|
| 14 |
+
|
| 15 |
+
Almost all known image file formats store images in top-to-bottom
|
| 16 |
+
order. As it is, OpenGL reads images from the frame buffer in
|
| 17 |
+
bottom-to-top order. Thus, images usually have to be inverted before
|
| 18 |
+
writing them to a file with image I/O libraries. This extension
|
| 19 |
+
allows images to be read such that inverting isn't needed.
|
| 20 |
+
|
| 21 |
+
The official definition of this extension is available here:
|
| 22 |
+
http://www.opengl.org/registry/specs/MESA/pack_invert.txt
|
| 23 |
+
'''
|
| 24 |
+
from OpenGL import platform, constant, arrays
|
| 25 |
+
from OpenGL import extensions, wrapper
|
| 26 |
+
import ctypes
|
| 27 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 28 |
+
from OpenGL.raw.GL.MESA.pack_invert import *
|
| 29 |
+
from OpenGL.raw.GL.MESA.pack_invert import _EXTENSION_NAME
|
| 30 |
+
|
| 31 |
+
def glInitPackInvertMESA():
|
| 32 |
+
'''Return boolean indicating whether this extension is available'''
|
| 33 |
+
from OpenGL import extensions
|
| 34 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/resize_buffers.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension MESA.resize_buffers
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.MESA.resize_buffers to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
Mesa is often used as a client library with no integration with
|
| 10 |
+
the computer's window system (an X server, for example). And since
|
| 11 |
+
Mesa does not have an event loop nor window system callbacks, it
|
| 12 |
+
cannot properly respond to window system events. In particular,
|
| 13 |
+
Mesa cannot automatically detect when a window has been resized.
|
| 14 |
+
|
| 15 |
+
Mesa's glViewport command queries the current window size and updates
|
| 16 |
+
its internal data structors accordingly. This normally works fine
|
| 17 |
+
since most applications call glViewport in responce to window size
|
| 18 |
+
changes.
|
| 19 |
+
|
| 20 |
+
In some situations, however, the application may not call glViewport
|
| 21 |
+
when a window size changes but would still like Mesa to adjust to
|
| 22 |
+
the new window size. This extension exports a new function to solve
|
| 23 |
+
this problem.
|
| 24 |
+
|
| 25 |
+
The official definition of this extension is available here:
|
| 26 |
+
http://www.opengl.org/registry/specs/MESA/resize_buffers.txt
|
| 27 |
+
'''
|
| 28 |
+
from OpenGL import platform, constant, arrays
|
| 29 |
+
from OpenGL import extensions, wrapper
|
| 30 |
+
import ctypes
|
| 31 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 32 |
+
from OpenGL.raw.GL.MESA.resize_buffers import *
|
| 33 |
+
from OpenGL.raw.GL.MESA.resize_buffers import _EXTENSION_NAME
|
| 34 |
+
|
| 35 |
+
def glInitResizeBuffersMESA():
|
| 36 |
+
'''Return boolean indicating whether this extension is available'''
|
| 37 |
+
from OpenGL import extensions
|
| 38 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/window_pos.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension MESA.window_pos
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.MESA.window_pos to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
In order to set the current raster position to a specific window
|
| 10 |
+
coordinate with the RasterPos command, the modelview matrix, projection
|
| 11 |
+
matrix and viewport must be set very carefully. Furthermore, if the
|
| 12 |
+
desired window coordinate is outside of the window's bounds one must
|
| 13 |
+
rely on a subtle side-effect of the Bitmap command in order to circumvent
|
| 14 |
+
frustum clipping.
|
| 15 |
+
|
| 16 |
+
This extension provides a set of functions to directly set the
|
| 17 |
+
current raster position, bypassing the modelview matrix, the
|
| 18 |
+
projection matrix and the viewport to window mapping. Furthermore,
|
| 19 |
+
clip testing is not performed.
|
| 20 |
+
|
| 21 |
+
This greatly simplifies the process of setting the current raster
|
| 22 |
+
position to a specific window coordinate prior to calling DrawPixels,
|
| 23 |
+
CopyPixels or Bitmap.
|
| 24 |
+
|
| 25 |
+
The official definition of this extension is available here:
|
| 26 |
+
http://www.opengl.org/registry/specs/MESA/window_pos.txt
|
| 27 |
+
'''
|
| 28 |
+
from OpenGL import platform, constant, arrays
|
| 29 |
+
from OpenGL import extensions, wrapper
|
| 30 |
+
import ctypes
|
| 31 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 32 |
+
from OpenGL.raw.GL.MESA.window_pos import *
|
| 33 |
+
from OpenGL.raw.GL.MESA.window_pos import _EXTENSION_NAME
|
| 34 |
+
|
| 35 |
+
def glInitWindowPosMESA():
|
| 36 |
+
'''Return boolean indicating whether this extension is available'''
|
| 37 |
+
from OpenGL import extensions
|
| 38 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 39 |
+
|
| 40 |
+
glWindowPos2dvMESA=wrapper.wrapper(glWindowPos2dvMESA).setInputArraySize(
|
| 41 |
+
'v', 2
|
| 42 |
+
)
|
| 43 |
+
glWindowPos2fvMESA=wrapper.wrapper(glWindowPos2fvMESA).setInputArraySize(
|
| 44 |
+
'v', 2
|
| 45 |
+
)
|
| 46 |
+
glWindowPos2ivMESA=wrapper.wrapper(glWindowPos2ivMESA).setInputArraySize(
|
| 47 |
+
'v', 2
|
| 48 |
+
)
|
| 49 |
+
glWindowPos2svMESA=wrapper.wrapper(glWindowPos2svMESA).setInputArraySize(
|
| 50 |
+
'v', 2
|
| 51 |
+
)
|
| 52 |
+
glWindowPos3dvMESA=wrapper.wrapper(glWindowPos3dvMESA).setInputArraySize(
|
| 53 |
+
'v', 3
|
| 54 |
+
)
|
| 55 |
+
glWindowPos3fvMESA=wrapper.wrapper(glWindowPos3fvMESA).setInputArraySize(
|
| 56 |
+
'v', 3
|
| 57 |
+
)
|
| 58 |
+
glWindowPos3ivMESA=wrapper.wrapper(glWindowPos3ivMESA).setInputArraySize(
|
| 59 |
+
'v', 3
|
| 60 |
+
)
|
| 61 |
+
glWindowPos3svMESA=wrapper.wrapper(glWindowPos3svMESA).setInputArraySize(
|
| 62 |
+
'v', 3
|
| 63 |
+
)
|
| 64 |
+
glWindowPos4dvMESA=wrapper.wrapper(glWindowPos4dvMESA).setInputArraySize(
|
| 65 |
+
'v', 4
|
| 66 |
+
)
|
| 67 |
+
glWindowPos4fvMESA=wrapper.wrapper(glWindowPos4fvMESA).setInputArraySize(
|
| 68 |
+
'v', 4
|
| 69 |
+
)
|
| 70 |
+
glWindowPos4ivMESA=wrapper.wrapper(glWindowPos4ivMESA).setInputArraySize(
|
| 71 |
+
'v', 4
|
| 72 |
+
)
|
| 73 |
+
glWindowPos4svMESA=wrapper.wrapper(glWindowPos4svMESA).setInputArraySize(
|
| 74 |
+
'v', 4
|
| 75 |
+
)
|
| 76 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESA/ycbcr_texture.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension MESA.ycbcr_texture
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.MESA.ycbcr_texture to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension supports texture images stored in the YCbCr format.
|
| 10 |
+
There is no support for converting YCbCr images to RGB or vice versa
|
| 11 |
+
during pixel transfer. The texture's YCbCr colors are converted to
|
| 12 |
+
RGB during texture sampling, after-which, all the usual per-fragment
|
| 13 |
+
operations take place. Only 2D texture images are supported (not
|
| 14 |
+
glDrawPixels, glReadPixels, etc).
|
| 15 |
+
|
| 16 |
+
A YCbCr pixel (texel) is a 16-bit unsigned short with two components.
|
| 17 |
+
The first component is luminance (Y). For pixels in even-numbered
|
| 18 |
+
image columns, the second component is Cb. For pixels in odd-numbered
|
| 19 |
+
image columns, the second component is Cr. If one were to convert the
|
| 20 |
+
data to RGB one would need to examine two pixels from columns N and N+1
|
| 21 |
+
(where N is even) to deduce the RGB color.
|
| 22 |
+
|
| 23 |
+
The official definition of this extension is available here:
|
| 24 |
+
http://www.opengl.org/registry/specs/MESA/ycbcr_texture.txt
|
| 25 |
+
'''
|
| 26 |
+
from OpenGL import platform, constant, arrays
|
| 27 |
+
from OpenGL import extensions, wrapper
|
| 28 |
+
import ctypes
|
| 29 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 30 |
+
from OpenGL.raw.GL.MESA.ycbcr_texture import *
|
| 31 |
+
from OpenGL.raw.GL.MESA.ycbcr_texture import _EXTENSION_NAME
|
| 32 |
+
|
| 33 |
+
def glInitYcbcrTextureMESA():
|
| 34 |
+
'''Return boolean indicating whether this extension is available'''
|
| 35 |
+
from OpenGL import extensions
|
| 36 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESAX/__init__.py
ADDED
|
File without changes
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESAX/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (165 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESAX/__pycache__/texture_stack.cpython-310.pyc
ADDED
|
Binary file (3.14 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/MESAX/texture_stack.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension MESAX.texture_stack
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.MESAX.texture_stack to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
There are a number of circumstances where an application may wish to
|
| 10 |
+
blend two textures out of a larger set of textures. Moreover, in some
|
| 11 |
+
cases the selected textures may vary on a per-fragment basis within
|
| 12 |
+
a polygon. Several examples include:
|
| 13 |
+
|
| 14 |
+
1. High dynamic range textures. The application stores several
|
| 15 |
+
different "exposures" of an image as different textures. On a
|
| 16 |
+
per-fragment basis, the application selects which exposures are
|
| 17 |
+
used.
|
| 18 |
+
|
| 19 |
+
2. A terrain engine where the altitude of a point determines the
|
| 20 |
+
texture applied to it. If the transition is from beach sand to
|
| 21 |
+
grass to rocks to snow, the application will store each texture
|
| 22 |
+
in a different texture map, and dynamically select which two
|
| 23 |
+
textures to blend at run-time.
|
| 24 |
+
|
| 25 |
+
3. Storing short video clips in textures. Each depth slice is a
|
| 26 |
+
single frame of video.
|
| 27 |
+
|
| 28 |
+
Several solutions to this problem have been proposed, but they either
|
| 29 |
+
involve using a separate texture unit for each texture map or using 3D
|
| 30 |
+
textures without mipmaps. Both of these options have major drawbacks.
|
| 31 |
+
|
| 32 |
+
This extension provides a third alternative that eliminates the major
|
| 33 |
+
drawbacks of both previous methods. A new texture target,
|
| 34 |
+
TEXTURE_2D_STACK, is added that functions identically to TEXTURE_3D in
|
| 35 |
+
all aspects except the sizes of the non-base level images. In
|
| 36 |
+
traditional 3D texturing, the size of the N+1 LOD is half the size
|
| 37 |
+
of the N LOD in all three dimensions. For the TEXTURE_2D_STACK target,
|
| 38 |
+
the height and width of the N+1 LOD is halved, but the depth is the
|
| 39 |
+
same for all levels of detail. The texture then becomes a "stack" of
|
| 40 |
+
2D textures. The per-fragment texel is selected by the R texture
|
| 41 |
+
coordinate.
|
| 42 |
+
|
| 43 |
+
References:
|
| 44 |
+
|
| 45 |
+
http://www.opengl.org/discussion_boards/cgi_directory/ultimatebb.cgi?ubb=get_topic;f=3;t=011557
|
| 46 |
+
http://www.opengl.org/discussion_boards/cgi_directory/ultimatebb.cgi?ubb=get_topic;f=3;t=000516
|
| 47 |
+
http://www.opengl.org/discussion_boards/cgi_directory/ultimatebb.cgi?ubb=get_topic;f=3;t=011903
|
| 48 |
+
http://www.delphi3d.net/articles/viewarticle.php?article=terraintex.htm
|
| 49 |
+
|
| 50 |
+
The official definition of this extension is available here:
|
| 51 |
+
http://www.opengl.org/registry/specs/MESAX/texture_stack.txt
|
| 52 |
+
'''
|
| 53 |
+
from OpenGL import platform, constant, arrays
|
| 54 |
+
from OpenGL import extensions, wrapper
|
| 55 |
+
import ctypes
|
| 56 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 57 |
+
from OpenGL.raw.GL.MESAX.texture_stack import *
|
| 58 |
+
from OpenGL.raw.GL.MESAX.texture_stack import _EXTENSION_NAME
|
| 59 |
+
|
| 60 |
+
def glInitTextureStackMESAX():
|
| 61 |
+
'''Return boolean indicating whether this extension is available'''
|
| 62 |
+
from OpenGL import extensions
|
| 63 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""OpenGL Extensions"""
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/depth_clamp.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.depth_clamp
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.depth_clamp to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
Conventional OpenGL clips geometric primitives to a clip volume
|
| 10 |
+
with six faces, two of which are the near and far clip planes.
|
| 11 |
+
Clipping to the near and far planes of the clip volume ensures that
|
| 12 |
+
interpolated depth values (after the depth range transform) must be
|
| 13 |
+
in the [0,1] range.
|
| 14 |
+
|
| 15 |
+
In some rendering applications such as shadow volumes, it is useful
|
| 16 |
+
to allow line and polygon primitives to be rasterized without
|
| 17 |
+
clipping the primitive to the near or far clip volume planes (side
|
| 18 |
+
clip volume planes clip normally). Without the near and far clip
|
| 19 |
+
planes, rasterization (pixel coverage determination) in X and Y
|
| 20 |
+
can proceed normally if we ignore the near and far clip planes.
|
| 21 |
+
The one major issue is that fragments of a primitive may extend
|
| 22 |
+
beyond the conventional window space depth range for depth values
|
| 23 |
+
(typically the range [0,1]). Rather than discarding fragments that
|
| 24 |
+
defy the window space depth range (effectively what near and far
|
| 25 |
+
plane clipping accomplish), the depth values can be clamped to the
|
| 26 |
+
current depth range.
|
| 27 |
+
|
| 28 |
+
This extension provides exactly such functionality. This
|
| 29 |
+
functionality is useful to obviate the need for near plane capping
|
| 30 |
+
of stenciled shadow volumes. The functionality may also be useful
|
| 31 |
+
for rendering geometry "beyond" the far plane if an alternative
|
| 32 |
+
algorithm (rather than depth testing) for hidden surface removal is
|
| 33 |
+
applied to such geometry (specifically, the painter's algorithm).
|
| 34 |
+
Similar situations at the near clip plane can be avoided at the
|
| 35 |
+
near clip plane where apparently solid objects can be "seen through"
|
| 36 |
+
if they intersect the near clip plane.
|
| 37 |
+
|
| 38 |
+
The official definition of this extension is available here:
|
| 39 |
+
http://www.opengl.org/registry/specs/NV/depth_clamp.txt
|
| 40 |
+
'''
|
| 41 |
+
from OpenGL import platform, constant, arrays
|
| 42 |
+
from OpenGL import extensions, wrapper
|
| 43 |
+
import ctypes
|
| 44 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 45 |
+
from OpenGL.raw.GL.NV.depth_clamp import *
|
| 46 |
+
from OpenGL.raw.GL.NV.depth_clamp import _EXTENSION_NAME
|
| 47 |
+
|
| 48 |
+
def glInitDepthClampNV():
|
| 49 |
+
'''Return boolean indicating whether this extension is available'''
|
| 50 |
+
from OpenGL import extensions
|
| 51 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/framebuffer_multisample_coverage.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.framebuffer_multisample_coverage
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.framebuffer_multisample_coverage to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension extends the EXT_framebuffer_multisample
|
| 10 |
+
specification by providing a new function,
|
| 11 |
+
RenderBufferStorageMultisampleCoverageNV, that distinguishes
|
| 12 |
+
between color samples and coverage samples.
|
| 13 |
+
|
| 14 |
+
EXT_framebuffer_multisample introduced the function
|
| 15 |
+
RenderbufferStorageMultisampleEXT as a method of defining the
|
| 16 |
+
storage parameters for a multisample render buffer. This function
|
| 17 |
+
takes a <samples> parameter. Using rules provided by the
|
| 18 |
+
specification, the <samples> parameter is resolved to an actual
|
| 19 |
+
number of samples that is supported by the underlying hardware.
|
| 20 |
+
EXT_framebuffer_multisample does not specify whether <samples>
|
| 21 |
+
refers to coverage samples or color samples.
|
| 22 |
+
|
| 23 |
+
This extension adds the function
|
| 24 |
+
RenderbufferStorageMultisamplCoverageNV, which takes a
|
| 25 |
+
<coverageSamples> parameter as well as a <colorSamples> parameter.
|
| 26 |
+
These two parameters give developers more fine grained control over
|
| 27 |
+
the quality of multisampled images.
|
| 28 |
+
|
| 29 |
+
The official definition of this extension is available here:
|
| 30 |
+
http://www.opengl.org/registry/specs/NV/framebuffer_multisample_coverage.txt
|
| 31 |
+
'''
|
| 32 |
+
from OpenGL import platform, constant, arrays
|
| 33 |
+
from OpenGL import extensions, wrapper
|
| 34 |
+
import ctypes
|
| 35 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 36 |
+
from OpenGL.raw.GL.NV.framebuffer_multisample_coverage import *
|
| 37 |
+
from OpenGL.raw.GL.NV.framebuffer_multisample_coverage import _EXTENSION_NAME
|
| 38 |
+
|
| 39 |
+
def glInitFramebufferMultisampleCoverageNV():
|
| 40 |
+
'''Return boolean indicating whether this extension is available'''
|
| 41 |
+
from OpenGL import extensions
|
| 42 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/gpu_program4.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.gpu_program4
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.gpu_program4 to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This specification documents the common instruction set and basic
|
| 10 |
+
functionality provided by NVIDIA's 4th generation of assembly instruction
|
| 11 |
+
sets supporting programmable graphics pipeline stages.
|
| 12 |
+
|
| 13 |
+
The instruction set builds upon the basic framework provided by the
|
| 14 |
+
ARB_vertex_program and ARB_fragment_program extensions to expose
|
| 15 |
+
considerably more capable hardware. In addition to new capabilities for
|
| 16 |
+
vertex and fragment programs, this extension provides a new program type
|
| 17 |
+
(geometry programs) further described in the NV_geometry_program4
|
| 18 |
+
specification.
|
| 19 |
+
|
| 20 |
+
NV_gpu_program4 provides a unified instruction set -- all instruction set
|
| 21 |
+
features are available for all program types, except for a small number of
|
| 22 |
+
features that make sense only for a specific program type. It provides
|
| 23 |
+
fully capable signed and unsigned integer data types, along with a set of
|
| 24 |
+
arithmetic, logical, and data type conversion instructions capable of
|
| 25 |
+
operating on integers. It also provides a uniform set of structured
|
| 26 |
+
branching constructs (if tests, loops, and subroutines) that fully support
|
| 27 |
+
run-time condition testing.
|
| 28 |
+
|
| 29 |
+
This extension provides several new texture mapping capabilities. Shadow
|
| 30 |
+
cube maps are supported, where cube map faces can encode depth values.
|
| 31 |
+
Texture lookup instructions can include an immediate texel offset, which
|
| 32 |
+
can assist in advanced filtering. New instructions are provided to fetch
|
| 33 |
+
a single texel by address in a texture map (TXF) and query the size of a
|
| 34 |
+
specified texture level (TXQ).
|
| 35 |
+
|
| 36 |
+
By and large, vertex and fragment programs written to ARB_vertex_program
|
| 37 |
+
and ARB_fragment_program can be ported directly by simply changing the
|
| 38 |
+
program header from "!!ARBvp1.0" or "!!ARBfp1.0" to "!!NVvp4.0" or
|
| 39 |
+
"!!NVfp4.0", and then modifying the code to take advantage of the expanded
|
| 40 |
+
feature set. There are a small number of areas where this extension is
|
| 41 |
+
not a functional superset of previous vertex program extensions, which are
|
| 42 |
+
documented in this specification.
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
The official definition of this extension is available here:
|
| 46 |
+
http://www.opengl.org/registry/specs/NV/gpu_program4.txt
|
| 47 |
+
'''
|
| 48 |
+
from OpenGL import platform, constant, arrays
|
| 49 |
+
from OpenGL import extensions, wrapper
|
| 50 |
+
import ctypes
|
| 51 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 52 |
+
from OpenGL.raw.GL.NV.gpu_program4 import *
|
| 53 |
+
from OpenGL.raw.GL.NV.gpu_program4 import _EXTENSION_NAME
|
| 54 |
+
|
| 55 |
+
def glInitGpuProgram4NV():
|
| 56 |
+
'''Return boolean indicating whether this extension is available'''
|
| 57 |
+
from OpenGL import extensions
|
| 58 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 59 |
+
|
| 60 |
+
glProgramLocalParameterI4ivNV=wrapper.wrapper(glProgramLocalParameterI4ivNV).setInputArraySize(
|
| 61 |
+
'params', 4
|
| 62 |
+
)
|
| 63 |
+
# INPUT glProgramLocalParametersI4ivNV.params size not checked against None
|
| 64 |
+
glProgramLocalParametersI4ivNV=wrapper.wrapper(glProgramLocalParametersI4ivNV).setInputArraySize(
|
| 65 |
+
'params', None
|
| 66 |
+
)
|
| 67 |
+
glProgramLocalParameterI4uivNV=wrapper.wrapper(glProgramLocalParameterI4uivNV).setInputArraySize(
|
| 68 |
+
'params', 4
|
| 69 |
+
)
|
| 70 |
+
# INPUT glProgramLocalParametersI4uivNV.params size not checked against None
|
| 71 |
+
glProgramLocalParametersI4uivNV=wrapper.wrapper(glProgramLocalParametersI4uivNV).setInputArraySize(
|
| 72 |
+
'params', None
|
| 73 |
+
)
|
| 74 |
+
glProgramEnvParameterI4ivNV=wrapper.wrapper(glProgramEnvParameterI4ivNV).setInputArraySize(
|
| 75 |
+
'params', 4
|
| 76 |
+
)
|
| 77 |
+
# INPUT glProgramEnvParametersI4ivNV.params size not checked against None
|
| 78 |
+
glProgramEnvParametersI4ivNV=wrapper.wrapper(glProgramEnvParametersI4ivNV).setInputArraySize(
|
| 79 |
+
'params', None
|
| 80 |
+
)
|
| 81 |
+
glProgramEnvParameterI4uivNV=wrapper.wrapper(glProgramEnvParameterI4uivNV).setInputArraySize(
|
| 82 |
+
'params', 4
|
| 83 |
+
)
|
| 84 |
+
# INPUT glProgramEnvParametersI4uivNV.params size not checked against None
|
| 85 |
+
glProgramEnvParametersI4uivNV=wrapper.wrapper(glProgramEnvParametersI4uivNV).setInputArraySize(
|
| 86 |
+
'params', None
|
| 87 |
+
)
|
| 88 |
+
glGetProgramLocalParameterIivNV=wrapper.wrapper(glGetProgramLocalParameterIivNV).setOutput(
|
| 89 |
+
'params',size=(4,),orPassIn=True
|
| 90 |
+
)
|
| 91 |
+
glGetProgramLocalParameterIuivNV=wrapper.wrapper(glGetProgramLocalParameterIuivNV).setOutput(
|
| 92 |
+
'params',size=(4,),orPassIn=True
|
| 93 |
+
)
|
| 94 |
+
glGetProgramEnvParameterIivNV=wrapper.wrapper(glGetProgramEnvParameterIivNV).setOutput(
|
| 95 |
+
'params',size=(4,),orPassIn=True
|
| 96 |
+
)
|
| 97 |
+
glGetProgramEnvParameterIuivNV=wrapper.wrapper(glGetProgramEnvParameterIuivNV).setOutput(
|
| 98 |
+
'params',size=(4,),orPassIn=True
|
| 99 |
+
)
|
| 100 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/gpu_program5_mem_extended.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.gpu_program5_mem_extended
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.gpu_program5_mem_extended to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension provides a new set of storage modifiers that can be used by
|
| 10 |
+
NV_gpu_program5 assembly program instructions loading from or storing to
|
| 11 |
+
various forms of GPU memory. In particular, we provide support for loads
|
| 12 |
+
and stores using the storage modifiers:
|
| 13 |
+
|
| 14 |
+
.F16X2 .F16X4 .F16 (for 16-bit floating-point scalars/vectors)
|
| 15 |
+
.S8X2 .S8X4 (for 8-bit signed integer vectors)
|
| 16 |
+
.S16X2 .S16X4 (for 16-bit signed integer vectors)
|
| 17 |
+
.U8X2 .U8X4 (for 8-bit unsigned integer vectors)
|
| 18 |
+
.U16X2 .U16X4 (for 16-bit unsigned integer vectors)
|
| 19 |
+
|
| 20 |
+
These modifiers are allowed for the following load/store instructions:
|
| 21 |
+
|
| 22 |
+
LDC Load from constant buffer
|
| 23 |
+
|
| 24 |
+
LOAD Global load
|
| 25 |
+
STORE Global store
|
| 26 |
+
|
| 27 |
+
LOADIM Image load (via EXT_shader_image_load_store)
|
| 28 |
+
STOREIM Image store (via EXT_shader_image_load_store)
|
| 29 |
+
|
| 30 |
+
LDB Load from storage buffer (via
|
| 31 |
+
NV_shader_storage_buffer_object)
|
| 32 |
+
STB Store to storage buffer (via
|
| 33 |
+
NV_shader_storage_buffer_object)
|
| 34 |
+
|
| 35 |
+
LDS Load from shared memory (via NV_compute_program5)
|
| 36 |
+
STS Store to shared memory (via NV_compute_program5)
|
| 37 |
+
|
| 38 |
+
For assembly programs prior to this extension, it was necessary to access
|
| 39 |
+
memory using packed types and then unpack with additional shader
|
| 40 |
+
instructions.
|
| 41 |
+
|
| 42 |
+
Similar capabilities have already been provided in the OpenGL Shading
|
| 43 |
+
Language (GLSL) via the NV_gpu_shader5 extension, using the extended data
|
| 44 |
+
types provided there (e.g., "float16_t", "u8vec4", "s16vec2").
|
| 45 |
+
|
| 46 |
+
The official definition of this extension is available here:
|
| 47 |
+
http://www.opengl.org/registry/specs/NV/gpu_program5_mem_extended.txt
|
| 48 |
+
'''
|
| 49 |
+
from OpenGL import platform, constant, arrays
|
| 50 |
+
from OpenGL import extensions, wrapper
|
| 51 |
+
import ctypes
|
| 52 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 53 |
+
from OpenGL.raw.GL.NV.gpu_program5_mem_extended import *
|
| 54 |
+
from OpenGL.raw.GL.NV.gpu_program5_mem_extended import _EXTENSION_NAME
|
| 55 |
+
|
| 56 |
+
def glInitGpuProgram5MemExtendedNV():
|
| 57 |
+
'''Return boolean indicating whether this extension is available'''
|
| 58 |
+
from OpenGL import extensions
|
| 59 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/occlusion_query.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.occlusion_query
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.occlusion_query to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
The HP_occlusion_test extension defines a mechanism whereby an
|
| 10 |
+
application can query the visibility of an object, where "visible"
|
| 11 |
+
means that at least one pixel passes the depth and stencil tests.
|
| 12 |
+
|
| 13 |
+
The HP extension has two major shortcomings.
|
| 14 |
+
|
| 15 |
+
- It returns the result as a simple GL_TRUE/GL_FALSE result, when in
|
| 16 |
+
fact it is often useful to know exactly how many pixels passed.
|
| 17 |
+
- It provides only a simple "stop-and-wait" model for using multiple
|
| 18 |
+
queries. The application begins an occlusion test and ends it;
|
| 19 |
+
then, at some later point, it asks for the result, at which point
|
| 20 |
+
the driver must stop and wait until the result from the previous
|
| 21 |
+
test is back before the application can even begin the next one.
|
| 22 |
+
This is a very simple model, but its performance is mediocre when
|
| 23 |
+
an application wishes to perform many queries, and it eliminates
|
| 24 |
+
most of the opportunites for parallelism between the CPU and GPU.
|
| 25 |
+
|
| 26 |
+
This extension solves both of those problems. It returns as its
|
| 27 |
+
result the number of pixels that pass, and it provides an interface
|
| 28 |
+
conceptually similar to that of NV_fence that allows applications to
|
| 29 |
+
issue many occlusion queries before asking for the result of any one.
|
| 30 |
+
As a result, they can overlap the time it takes for the occlusion
|
| 31 |
+
query results to be returned with other, more useful work, such as
|
| 32 |
+
rendering other parts of the scene or performing other computations
|
| 33 |
+
on the CPU.
|
| 34 |
+
|
| 35 |
+
There are many situations where a pixel count, rather than a boolean
|
| 36 |
+
result, is useful.
|
| 37 |
+
|
| 38 |
+
- If the visibility test is an object bounding box being used to
|
| 39 |
+
decide whether to skip the object, sometimes it can be acceptable,
|
| 40 |
+
and beneficial to performance, to skip an object if less than some
|
| 41 |
+
threshold number of pixels could be visible.
|
| 42 |
+
- Knowing the number of pixels visible in the bounding box may also
|
| 43 |
+
help decide what level of detail a model should be drawn with. If
|
| 44 |
+
only a few pixels are visible, a low-detail model may be
|
| 45 |
+
acceptable. In general, this allows level-of-detail mechanisms to
|
| 46 |
+
be slightly less ad hoc.
|
| 47 |
+
- "Depth peeling" techniques, such as order-independent transparency,
|
| 48 |
+
would typically like to know when to stop rendering more layers; it
|
| 49 |
+
is difficult to come up with a way to determine a priori how many
|
| 50 |
+
layers to use. A boolean count allows applications to stop when
|
| 51 |
+
more layers will not affect the image at all, but this will likely
|
| 52 |
+
be unacceptable for performance, with minimal gains to image
|
| 53 |
+
quality. Instead, it makes more sense to stop rendering when the
|
| 54 |
+
number of pixels goes below a threshold; this should provide better
|
| 55 |
+
results than any of these other algorithms.
|
| 56 |
+
- Occlusion queries can be used as a replacement for glReadPixels of
|
| 57 |
+
the depth buffer to determine whether, say, a light source is
|
| 58 |
+
visible for the purposes of a lens flare effect or a halo to
|
| 59 |
+
simulate glare. Pixel counts allow you to compute the percentage
|
| 60 |
+
of the light source that is visible, and the brightness of these
|
| 61 |
+
effects can be modulated accordingly.
|
| 62 |
+
|
| 63 |
+
The official definition of this extension is available here:
|
| 64 |
+
http://www.opengl.org/registry/specs/NV/occlusion_query.txt
|
| 65 |
+
'''
|
| 66 |
+
from OpenGL import platform, constant, arrays
|
| 67 |
+
from OpenGL import extensions, wrapper
|
| 68 |
+
import ctypes
|
| 69 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 70 |
+
from OpenGL.raw.GL.NV.occlusion_query import *
|
| 71 |
+
from OpenGL.raw.GL.NV.occlusion_query import _EXTENSION_NAME
|
| 72 |
+
|
| 73 |
+
def glInitOcclusionQueryNV():
|
| 74 |
+
'''Return boolean indicating whether this extension is available'''
|
| 75 |
+
from OpenGL import extensions
|
| 76 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 77 |
+
|
| 78 |
+
glGenOcclusionQueriesNV=wrapper.wrapper(glGenOcclusionQueriesNV).setOutput(
|
| 79 |
+
'ids',size=lambda x:(x,),pnameArg='n',orPassIn=True
|
| 80 |
+
)
|
| 81 |
+
# INPUT glDeleteOcclusionQueriesNV.ids size not checked against n
|
| 82 |
+
glDeleteOcclusionQueriesNV=wrapper.wrapper(glDeleteOcclusionQueriesNV).setInputArraySize(
|
| 83 |
+
'ids', None
|
| 84 |
+
)
|
| 85 |
+
glGetOcclusionQueryivNV=wrapper.wrapper(glGetOcclusionQueryivNV).setOutput(
|
| 86 |
+
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
|
| 87 |
+
)
|
| 88 |
+
glGetOcclusionQueryuivNV=wrapper.wrapper(glGetOcclusionQueryuivNV).setOutput(
|
| 89 |
+
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
|
| 90 |
+
)
|
| 91 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/shader_thread_shuffle.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.shader_thread_shuffle
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.shader_thread_shuffle to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
The official definition of this extension is available here:
|
| 8 |
+
http://www.opengl.org/registry/specs/NV/shader_thread_shuffle.txt
|
| 9 |
+
'''
|
| 10 |
+
from OpenGL import platform, constant, arrays
|
| 11 |
+
from OpenGL import extensions, wrapper
|
| 12 |
+
import ctypes
|
| 13 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 14 |
+
from OpenGL.raw.GL.NV.shader_thread_shuffle import *
|
| 15 |
+
from OpenGL.raw.GL.NV.shader_thread_shuffle import _EXTENSION_NAME
|
| 16 |
+
|
| 17 |
+
def glInitShaderThreadShuffleNV():
|
| 18 |
+
'''Return boolean indicating whether this extension is available'''
|
| 19 |
+
from OpenGL import extensions
|
| 20 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/texture_multisample.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.texture_multisample
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.texture_multisample to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This specification extends NV_gpu_program4 to support per-sample fetching
|
| 10 |
+
from multisample textures described in ARB_texture_multisample.
|
| 11 |
+
Specifically, it adds:
|
| 12 |
+
|
| 13 |
+
* The TXFMS sample fetch instruction.
|
| 14 |
+
|
| 15 |
+
* Texture targets corresponding to the multisample textures added by
|
| 16 |
+
ARB_texture_multisample.
|
| 17 |
+
|
| 18 |
+
* A program option to enable these features.
|
| 19 |
+
|
| 20 |
+
This specification also extends the ARB_texture_multisample extension
|
| 21 |
+
by adding support for EXT_direct_state_access and VCAA multisample
|
| 22 |
+
coverage with seperate <colorSamples> and <coverageSamples> parameters.
|
| 23 |
+
|
| 24 |
+
The official definition of this extension is available here:
|
| 25 |
+
http://www.opengl.org/registry/specs/NV/texture_multisample.txt
|
| 26 |
+
'''
|
| 27 |
+
from OpenGL import platform, constant, arrays
|
| 28 |
+
from OpenGL import extensions, wrapper
|
| 29 |
+
import ctypes
|
| 30 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 31 |
+
from OpenGL.raw.GL.NV.texture_multisample import *
|
| 32 |
+
from OpenGL.raw.GL.NV.texture_multisample import _EXTENSION_NAME
|
| 33 |
+
|
| 34 |
+
def glInitTextureMultisampleNV():
|
| 35 |
+
'''Return boolean indicating whether this extension is available'''
|
| 36 |
+
from OpenGL import extensions
|
| 37 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/texture_shader.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.texture_shader
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.texture_shader to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
Standard OpenGL and the ARB_multitexture extension define a
|
| 10 |
+
straightforward direct mechanism for mapping sets of texture
|
| 11 |
+
coordinates to filtered colors. This extension provides a more
|
| 12 |
+
functional mechanism.
|
| 13 |
+
|
| 14 |
+
OpenGL's standard texturing mechanism defines a set of texture
|
| 15 |
+
targets. Each texture target defines how the texture image
|
| 16 |
+
is specified and accessed via a set of texture coordinates.
|
| 17 |
+
OpenGL 1.0 defines the 1D and 2D texture targets. OpenGL 1.2
|
| 18 |
+
(and/or the EXT_texture3D extension) defines the 3D texture target.
|
| 19 |
+
The ARB_texture_cube_map extension defines the cube map texture
|
| 20 |
+
target. Each texture unit's texture coordinate set is mapped to a
|
| 21 |
+
color using the unit's highest priority enabled texture target.
|
| 22 |
+
|
| 23 |
+
This extension introduces texture shader stages. A sequence of
|
| 24 |
+
texture shader stages provides a more flexible mechanism for mapping
|
| 25 |
+
sets of texture coordinates to texture unit RGBA results than standard
|
| 26 |
+
OpenGL.
|
| 27 |
+
|
| 28 |
+
When the texture shader enable is on, the extension replaces the
|
| 29 |
+
conventional OpenGL mechanism for mapping sets of texture coordinates
|
| 30 |
+
to filtered colors with this extension's sequence of texture shader
|
| 31 |
+
stages.
|
| 32 |
+
|
| 33 |
+
Each texture shader stage runs one of 21 canned texture shader
|
| 34 |
+
programs. These programs support conventional OpenGL texture
|
| 35 |
+
mapping but also support dependent texture accesses, dot product
|
| 36 |
+
texture programs, and special modes. (3D texture mapping
|
| 37 |
+
texture shader operations are NOT provided by this extension;
|
| 38 |
+
3D texture mapping texture shader operations are added by the
|
| 39 |
+
NV_texture_shader2 extension that is layered on this extension.
|
| 40 |
+
See the NV_texture_shader2 specification.)
|
| 41 |
+
|
| 42 |
+
To facilitate the new texture shader programs, this extension
|
| 43 |
+
introduces several new texture formats and variations on existing
|
| 44 |
+
formats. Existing color texture formats are extended by introducing
|
| 45 |
+
new signed variants. Two new types of texture formats (beyond colors)
|
| 46 |
+
are also introduced. Texture offset groups encode two signed offsets,
|
| 47 |
+
and optionally a magnitude or a magnitude and an intensity. The new
|
| 48 |
+
HILO (pronounced high-low) formats provide possibly signed, high
|
| 49 |
+
precision (16-bit) two-component textures.
|
| 50 |
+
|
| 51 |
+
Each program takes as input the stage's interpolated texture
|
| 52 |
+
coordinate set (s,t,r,q). Each program generates two results:
|
| 53 |
+
a shader stage result that may be used as an input to subsequent
|
| 54 |
+
shader stage programs, and a texture unit RGBA result that becomes the
|
| 55 |
+
texture color used by the texture unit's texture environment function
|
| 56 |
+
or becomes the initial value for the corresponding texture register
|
| 57 |
+
for register combiners. The texture unit RGBA result is always
|
| 58 |
+
an RGBA color, but the shader stage result may be one of an RGBA
|
| 59 |
+
color, a HILO value, a texture offset group, a floating-point value,
|
| 60 |
+
or an invalid result. When both results are RGBA colors, the shader
|
| 61 |
+
stage result and the texture unit RGBA result are usually identical
|
| 62 |
+
(though not in all cases).
|
| 63 |
+
|
| 64 |
+
Additionally, certain programs have a side-effect such as culling
|
| 65 |
+
the fragment or replacing the fragment's depth value.
|
| 66 |
+
|
| 67 |
+
The twenty-one programs are briefly described:
|
| 68 |
+
|
| 69 |
+
<none>
|
| 70 |
+
|
| 71 |
+
1. NONE - Always generates a (0,0,0,0) texture unit RGBA result.
|
| 72 |
+
Equivalent to disabling all texture targets in conventional
|
| 73 |
+
OpenGL.
|
| 74 |
+
|
| 75 |
+
<conventional textures>
|
| 76 |
+
|
| 77 |
+
2. TEXTURE_1D - Accesses a 1D texture via (s/q).
|
| 78 |
+
|
| 79 |
+
3. TEXTURE_2D - Accesses a 2D texture via (s/q,t/q).
|
| 80 |
+
|
| 81 |
+
4. TEXTURE_RECTANGLE_NV - Accesses a rectangular texture via (s/q,t/q).
|
| 82 |
+
|
| 83 |
+
5. TEXTURE_CUBE_MAP_ARB - Accesses a cube map texture via (s,t,r).
|
| 84 |
+
|
| 85 |
+
<special modes>
|
| 86 |
+
|
| 87 |
+
6. PASS_THROUGH_NV - Converts a texture coordinate (s,t,r,q)
|
| 88 |
+
directly to a [0,1] clamped (r,g,b,a) texture unit RGBA result.
|
| 89 |
+
|
| 90 |
+
7. CULL_FRAGMENT_NV - Culls the fragment based on the whether each
|
| 91 |
+
(s,t,r,q) is "greater than or equal to zero" or "less than zero".
|
| 92 |
+
|
| 93 |
+
<offset textures>
|
| 94 |
+
|
| 95 |
+
8. OFFSET_TEXTURE_2D_NV - Transforms the signed (ds,dt) components
|
| 96 |
+
of a previous texture unit by a 2x2 floating-point matrix and
|
| 97 |
+
then uses the result to offset the stage's texture coordinates
|
| 98 |
+
for a 2D non-projective texture.
|
| 99 |
+
|
| 100 |
+
9. OFFSET_TEXTURE_2D_SCALE_NV - Same as above except the magnitude
|
| 101 |
+
component of the previous texture unit result scales the red,
|
| 102 |
+
green, and blue components of the unsigned RGBA texture 2D
|
| 103 |
+
access.
|
| 104 |
+
|
| 105 |
+
10. OFFSET_TEXTURE_RECTANGLE_NV - Similar to OFFSET_TEXTURE_2D_NV
|
| 106 |
+
except that the texture access is into a rectangular
|
| 107 |
+
non-projective texture.
|
| 108 |
+
|
| 109 |
+
11. OFFSET_TEXTURE_RECTANGLE_SCALE_NV - Similar to
|
| 110 |
+
OFFSET_TEXTURE_2D_SCALE_NV except that the texture access is
|
| 111 |
+
into a rectangular non-projective texture.
|
| 112 |
+
|
| 113 |
+
<dependent textures>
|
| 114 |
+
|
| 115 |
+
12. DEPENDENT_AR_TEXTURE_2D_NV - Converts the alpha and red
|
| 116 |
+
components of a previous shader result into an (s,t) texture
|
| 117 |
+
coordinate set to access a 2D non-projective texture.
|
| 118 |
+
|
| 119 |
+
13. DEPENDENT_GB_TEXTURE_2D_NV - Converts the green and blue
|
| 120 |
+
components of a previous shader result into an (s,t) texture
|
| 121 |
+
coordinate set to access a 2D non-projective texture.
|
| 122 |
+
|
| 123 |
+
<dot product textures>
|
| 124 |
+
|
| 125 |
+
14. DOT_PRODUCT_NV - Computes the dot product of the texture
|
| 126 |
+
shader's texture coordinate set (s,t,r) with some mapping of the
|
| 127 |
+
components of a previous texture shader result. The component
|
| 128 |
+
mapping depends on the type (RGBA or HILO) and signedness of
|
| 129 |
+
the stage's previous texture input. Other dot product texture
|
| 130 |
+
programs use the result of this program to compose a texture
|
| 131 |
+
coordinate set for a dependent texture access. The color result
|
| 132 |
+
is undefined.
|
| 133 |
+
|
| 134 |
+
15. DOT_PRODUCT_TEXTURE_2D_NV - When preceded by a DOT_PRODUCT_NV
|
| 135 |
+
program in the previous texture shader stage, computes a second
|
| 136 |
+
similar dot product and composes the two dot products into (s,t)
|
| 137 |
+
texture coordinate set to access a 2D non-projective texture.
|
| 138 |
+
|
| 139 |
+
16. DOT_PRODUCT_TEXTURE_RECTANGLE_NV - Similar to
|
| 140 |
+
DOT_PRODUCT_TEXTURE_2D_NV except that the texture acces is into
|
| 141 |
+
a rectangular non-projective texture.
|
| 142 |
+
|
| 143 |
+
17. DOT_PRODUCT_TEXTURE_CUBE_MAP_NV - When preceded by two
|
| 144 |
+
DOT_PRODUCT_NV programs in the previous two texture shader
|
| 145 |
+
stages, computes a third similar dot product and composes the
|
| 146 |
+
three dot products into (s,t,r) texture coordinate set to access
|
| 147 |
+
a cube map texture.
|
| 148 |
+
|
| 149 |
+
18. DOT_PRODUCT_REFLECT_CUBE_MAP_NV - When preceded by two
|
| 150 |
+
DOT_PRODUCT_NV programs in the previous two texture shader
|
| 151 |
+
stages, computes a third similar dot product and composes the
|
| 152 |
+
three dot products into a normal vector (Nx,Ny,Nz). An eye
|
| 153 |
+
vector (Ex,Ey,Ez) is composed from the q texture coordinates of
|
| 154 |
+
the three stages. A reflection vector (Rx,Ry,Rz) is computed
|
| 155 |
+
based on the normal and eye vectors. The reflection vector
|
| 156 |
+
forms an (s,t,r) texture coordinate set to access a cube map
|
| 157 |
+
texture.
|
| 158 |
+
|
| 159 |
+
19. DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV - Operates like
|
| 160 |
+
DOT_PRODUCT_REFLECT_CUBE_MAP_NV except that the eye vector
|
| 161 |
+
(Ex,Ey,Ez) is a user-defined constant rather than composed from
|
| 162 |
+
the q coordinates of the three stages.
|
| 163 |
+
|
| 164 |
+
20. DOT_PRODUCT_DIFFUSE_CUBE_MAP_NV - When used instead of the second
|
| 165 |
+
DOT_PRODUCT_NV program preceding
|
| 166 |
+
a DOT_PRODUCT_REFLECT_CUBE_MAP_NV or
|
| 167 |
+
DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV stage, the normal
|
| 168 |
+
vector forms an (s,t,r) texture coordinate set to access a
|
| 169 |
+
cube map texture.
|
| 170 |
+
|
| 171 |
+
<dot product depth replace>
|
| 172 |
+
|
| 173 |
+
21. DOT_PRODUCT_DEPTH_REPLACE_NV - When preceded by a DOT_PRODUCT_NV
|
| 174 |
+
program in the previous texture shader stage, computes a second
|
| 175 |
+
similar dot product and replaces the fragment's window-space
|
| 176 |
+
depth value with the first dot product results divided by
|
| 177 |
+
the second. The texture unit RGBA result is (0,0,0,0).
|
| 178 |
+
|
| 179 |
+
The official definition of this extension is available here:
|
| 180 |
+
http://www.opengl.org/registry/specs/NV/texture_shader.txt
|
| 181 |
+
'''
|
| 182 |
+
from OpenGL import platform, constant, arrays
|
| 183 |
+
from OpenGL import extensions, wrapper
|
| 184 |
+
import ctypes
|
| 185 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 186 |
+
from OpenGL.raw.GL.NV.texture_shader import *
|
| 187 |
+
from OpenGL.raw.GL.NV.texture_shader import _EXTENSION_NAME
|
| 188 |
+
|
| 189 |
+
def glInitTextureShaderNV():
|
| 190 |
+
'''Return boolean indicating whether this extension is available'''
|
| 191 |
+
from OpenGL import extensions
|
| 192 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
### END AUTOGENERATED SECTION
|
| 196 |
+
GL_OFFSET_TEXTURE_2D_BIAS_NV = GL_OFFSET_TEXTURE_BIAS_NV # alias
|
| 197 |
+
GL_OFFSET_TEXTURE_2D_MATRIX_NV = GL_OFFSET_TEXTURE_MATRIX_NV # alias
|
| 198 |
+
GL_OFFSET_TEXTURE_2D_SCALE_NV = GL_OFFSET_TEXTURE_SCALE_NV # alias
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vdpau_interop.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.vdpau_interop
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.vdpau_interop to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension allows VDPAU video and output surfaces to be used
|
| 10 |
+
for texturing and rendering.
|
| 11 |
+
|
| 12 |
+
This allows the GL to process and display the content of video
|
| 13 |
+
streams decoded using VDPAU.
|
| 14 |
+
|
| 15 |
+
Alternatively, the GL may modify VDPAU surfaces in-place, and VDPAU
|
| 16 |
+
may then process and/or display those surfaces itself.
|
| 17 |
+
|
| 18 |
+
This allows the GL to be used to combine application user-interface
|
| 19 |
+
elements with decoded video, implement custom video-processing
|
| 20 |
+
algorithms, etc.
|
| 21 |
+
|
| 22 |
+
The official definition of this extension is available here:
|
| 23 |
+
http://www.opengl.org/registry/specs/NV/vdpau_interop.txt
|
| 24 |
+
'''
|
| 25 |
+
from OpenGL import platform, constant, arrays
|
| 26 |
+
from OpenGL import extensions, wrapper
|
| 27 |
+
import ctypes
|
| 28 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 29 |
+
from OpenGL.raw.GL.NV.vdpau_interop import *
|
| 30 |
+
from OpenGL.raw.GL.NV.vdpau_interop import _EXTENSION_NAME
|
| 31 |
+
|
| 32 |
+
def glInitVdpauInteropNV():
|
| 33 |
+
'''Return boolean indicating whether this extension is available'''
|
| 34 |
+
from OpenGL import extensions
|
| 35 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 36 |
+
|
| 37 |
+
# glVDPAURegisterVideoSurfaceNV.vdpSurface is OUTPUT without known output size
|
| 38 |
+
# INPUT glVDPAURegisterVideoSurfaceNV.textureNames size not checked against numTextureNames
|
| 39 |
+
glVDPAURegisterVideoSurfaceNV=wrapper.wrapper(glVDPAURegisterVideoSurfaceNV).setInputArraySize(
|
| 40 |
+
'textureNames', None
|
| 41 |
+
)
|
| 42 |
+
# glVDPAURegisterOutputSurfaceNV.vdpSurface is OUTPUT without known output size
|
| 43 |
+
# INPUT glVDPAURegisterOutputSurfaceNV.textureNames size not checked against numTextureNames
|
| 44 |
+
glVDPAURegisterOutputSurfaceNV=wrapper.wrapper(glVDPAURegisterOutputSurfaceNV).setInputArraySize(
|
| 45 |
+
'textureNames', None
|
| 46 |
+
)
|
| 47 |
+
# glVDPAUGetSurfaceivNV.length is OUTPUT without known output size
|
| 48 |
+
glVDPAUGetSurfaceivNV=wrapper.wrapper(glVDPAUGetSurfaceivNV).setOutput(
|
| 49 |
+
'values',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
|
| 50 |
+
)
|
| 51 |
+
# INPUT glVDPAUMapSurfacesNV.surfaces size not checked against numSurfaces
|
| 52 |
+
glVDPAUMapSurfacesNV=wrapper.wrapper(glVDPAUMapSurfacesNV).setInputArraySize(
|
| 53 |
+
'surfaces', None
|
| 54 |
+
)
|
| 55 |
+
# INPUT glVDPAUUnmapSurfacesNV.surfaces size not checked against numSurface
|
| 56 |
+
glVDPAUUnmapSurfacesNV=wrapper.wrapper(glVDPAUUnmapSurfacesNV).setInputArraySize(
|
| 57 |
+
'surfaces', None
|
| 58 |
+
)
|
| 59 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vertex_program1_1.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.vertex_program1_1
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.vertex_program1_1 to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension adds four new vertex program instructions (DPH,
|
| 10 |
+
RCC, SUB, and ABS).
|
| 11 |
+
|
| 12 |
+
This extension also supports a position-invariant vertex program
|
| 13 |
+
option. A vertex program is position-invariant when it generates
|
| 14 |
+
the _exact_ same homogenuous position and window space position
|
| 15 |
+
for a vertex as conventional OpenGL transformation (ignoring vertex
|
| 16 |
+
blending and weighting).
|
| 17 |
+
|
| 18 |
+
By default, vertex programs are _not_ guaranteed to be
|
| 19 |
+
position-invariant because there is no guarantee made that the way
|
| 20 |
+
a vertex program might compute its homogenous position is exactly
|
| 21 |
+
identical to the way conventional OpenGL transformation computes
|
| 22 |
+
its homogenous positions. In a position-invariant vertex program,
|
| 23 |
+
the homogeneous position (HPOS) is not output by the program.
|
| 24 |
+
Instead, the OpenGL implementation is expected to compute the HPOS
|
| 25 |
+
for position-invariant vertex programs in a manner exactly identical
|
| 26 |
+
to how the homogenous position and window position are computed
|
| 27 |
+
for a vertex by conventional OpenGL transformation. In this way
|
| 28 |
+
position-invariant vertex programs guarantee correct multi-pass
|
| 29 |
+
rendering semantics in cases where multiple passes are rendered and
|
| 30 |
+
the second and subsequent passes use a GL_EQUAL depth test.
|
| 31 |
+
|
| 32 |
+
The official definition of this extension is available here:
|
| 33 |
+
http://www.opengl.org/registry/specs/NV/vertex_program1_1.txt
|
| 34 |
+
'''
|
| 35 |
+
from OpenGL import platform, constant, arrays
|
| 36 |
+
from OpenGL import extensions, wrapper
|
| 37 |
+
import ctypes
|
| 38 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 39 |
+
from OpenGL.raw.GL.NV.vertex_program1_1 import *
|
| 40 |
+
from OpenGL.raw.GL.NV.vertex_program1_1 import _EXTENSION_NAME
|
| 41 |
+
|
| 42 |
+
def glInitVertexProgram11NV():
|
| 43 |
+
'''Return boolean indicating whether this extension is available'''
|
| 44 |
+
from OpenGL import extensions
|
| 45 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vertex_program4.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension NV.vertex_program4
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.NV.vertex_program4 to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension builds on the common assembly instruction set
|
| 10 |
+
infrastructure provided by NV_gpu_program4, adding vertex program-specific
|
| 11 |
+
features.
|
| 12 |
+
|
| 13 |
+
This extension provides the ability to specify integer vertex attributes
|
| 14 |
+
that are passed to vertex programs using integer data types, rather than
|
| 15 |
+
being converted to floating-point values as in existing vertex attribute
|
| 16 |
+
functions. The set of input and output bindings provided includes all
|
| 17 |
+
bindings supported by ARB_vertex_program. This extension provides
|
| 18 |
+
additional input bindings identifying the index of the vertex when vertex
|
| 19 |
+
arrays are used ("vertex.id") and the instance number when instanced
|
| 20 |
+
arrays are used ("vertex.instance", requires EXT_draw_instanced). It
|
| 21 |
+
also provides output bindings allowing vertex programs to directly specify
|
| 22 |
+
clip distances (for user clipping) plus a set of generic attributes that
|
| 23 |
+
allow programs to pass a greater number of attributes to subsequent
|
| 24 |
+
pipeline stages than is possible using only the pre-defined fixed-function
|
| 25 |
+
vertex outputs.
|
| 26 |
+
|
| 27 |
+
By and large, programs written to ARB_vertex_program can be ported
|
| 28 |
+
directly by simply changing the program header from "!!ARBvp1.0" to
|
| 29 |
+
"!!NVvp4.0", and then modifying instructions to take advantage of the
|
| 30 |
+
expanded feature set. There are a small number of areas where this
|
| 31 |
+
extension is not a functional superset of previous vertex program
|
| 32 |
+
extensions, which are documented in the NV_gpu_program4 specification.
|
| 33 |
+
|
| 34 |
+
The official definition of this extension is available here:
|
| 35 |
+
http://www.opengl.org/registry/specs/NV/vertex_program4.txt
|
| 36 |
+
'''
|
| 37 |
+
from OpenGL import platform, constant, arrays
|
| 38 |
+
from OpenGL import extensions, wrapper
|
| 39 |
+
import ctypes
|
| 40 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 41 |
+
from OpenGL.raw.GL.NV.vertex_program4 import *
|
| 42 |
+
from OpenGL.raw.GL.NV.vertex_program4 import _EXTENSION_NAME
|
| 43 |
+
|
| 44 |
+
def glInitVertexProgram4NV():
|
| 45 |
+
'''Return boolean indicating whether this extension is available'''
|
| 46 |
+
from OpenGL import extensions
|
| 47 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 48 |
+
|
| 49 |
+
glVertexAttribI1ivEXT=wrapper.wrapper(glVertexAttribI1ivEXT).setInputArraySize(
|
| 50 |
+
'v', 1
|
| 51 |
+
)
|
| 52 |
+
glVertexAttribI2ivEXT=wrapper.wrapper(glVertexAttribI2ivEXT).setInputArraySize(
|
| 53 |
+
'v', 2
|
| 54 |
+
)
|
| 55 |
+
glVertexAttribI3ivEXT=wrapper.wrapper(glVertexAttribI3ivEXT).setInputArraySize(
|
| 56 |
+
'v', 3
|
| 57 |
+
)
|
| 58 |
+
glVertexAttribI4ivEXT=wrapper.wrapper(glVertexAttribI4ivEXT).setInputArraySize(
|
| 59 |
+
'v', 4
|
| 60 |
+
)
|
| 61 |
+
glVertexAttribI1uivEXT=wrapper.wrapper(glVertexAttribI1uivEXT).setInputArraySize(
|
| 62 |
+
'v', 1
|
| 63 |
+
)
|
| 64 |
+
glVertexAttribI2uivEXT=wrapper.wrapper(glVertexAttribI2uivEXT).setInputArraySize(
|
| 65 |
+
'v', 2
|
| 66 |
+
)
|
| 67 |
+
glVertexAttribI3uivEXT=wrapper.wrapper(glVertexAttribI3uivEXT).setInputArraySize(
|
| 68 |
+
'v', 3
|
| 69 |
+
)
|
| 70 |
+
glVertexAttribI4uivEXT=wrapper.wrapper(glVertexAttribI4uivEXT).setInputArraySize(
|
| 71 |
+
'v', 4
|
| 72 |
+
)
|
| 73 |
+
glVertexAttribI4bvEXT=wrapper.wrapper(glVertexAttribI4bvEXT).setInputArraySize(
|
| 74 |
+
'v', 4
|
| 75 |
+
)
|
| 76 |
+
glVertexAttribI4svEXT=wrapper.wrapper(glVertexAttribI4svEXT).setInputArraySize(
|
| 77 |
+
'v', 4
|
| 78 |
+
)
|
| 79 |
+
glVertexAttribI4ubvEXT=wrapper.wrapper(glVertexAttribI4ubvEXT).setInputArraySize(
|
| 80 |
+
'v', 4
|
| 81 |
+
)
|
| 82 |
+
glVertexAttribI4usvEXT=wrapper.wrapper(glVertexAttribI4usvEXT).setInputArraySize(
|
| 83 |
+
'v', 4
|
| 84 |
+
)
|
| 85 |
+
# INPUT glVertexAttribIPointerEXT.pointer size not checked against 'size,type,stride'
|
| 86 |
+
glVertexAttribIPointerEXT=wrapper.wrapper(glVertexAttribIPointerEXT).setInputArraySize(
|
| 87 |
+
'pointer', None
|
| 88 |
+
)
|
| 89 |
+
glGetVertexAttribIivEXT=wrapper.wrapper(glGetVertexAttribIivEXT).setOutput(
|
| 90 |
+
'params',size=(1,),orPassIn=True
|
| 91 |
+
)
|
| 92 |
+
glGetVertexAttribIuivEXT=wrapper.wrapper(glGetVertexAttribIuivEXT).setOutput(
|
| 93 |
+
'params',size=(1,),orPassIn=True
|
| 94 |
+
)
|
| 95 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/OES/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (192 Bytes). View file
|
|
|