Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- videollama2/lib/python3.10/site-packages/pandas/core/arrays/__init__.py +43 -0
- videollama2/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py +84 -0
- videollama2/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py +547 -0
- videollama2/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py +207 -0
- videollama2/lib/python3.10/site-packages/pandas/core/arrays/_utils.py +63 -0
- videollama2/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py +2556 -0
- videollama2/lib/python3.10/site-packages/pandas/core/arrays/integer.py +272 -0
- videollama2/lib/python3.10/site-packages/pandas/core/arrays/masked.py +1650 -0
- videollama2/lib/python3.10/site-packages/pandas/core/arrays/numeric.py +286 -0
- videollama2/lib/python3.10/site-packages/pandas/core/arrays/period.py +1313 -0
- videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__init__.py +1 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/draw_buffers.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/envmap_bumpmap.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/fragment_shader.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/pn_triangles.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/texture_float.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/texture_mirror_once.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/vertex_attrib_array_object.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/draw_buffers.py +48 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/envmap_bumpmap.py +54 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/fragment_shader.py +46 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/map_object_buffer.py +31 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/meminfo.py +33 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/pixel_format_float.py +41 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/pn_triangles.py +38 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/separate_stencil.py +29 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/text_fragment_shader.py +84 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/texture_env_combine3.py +44 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/texture_float.py +33 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/texture_mirror_once.py +36 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/vertex_array_object.py +56 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/vertex_attrib_array_object.py +36 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/vertex_streams.py +96 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/KHR/__pycache__/texture_compression_astc_ldr.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/multisample.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/pixel_texture.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/point_line_texgen.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/sharpen_texture.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/texture4D.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/texture_border_clamp.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/texture_color_mask.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/texture_edge_clamp.cpython-310.pyc +0 -0
videollama2/lib/python3.10/site-packages/pandas/core/arrays/__init__.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pandas.core.arrays.arrow import ArrowExtensionArray
|
| 2 |
+
from pandas.core.arrays.base import (
|
| 3 |
+
ExtensionArray,
|
| 4 |
+
ExtensionOpsMixin,
|
| 5 |
+
ExtensionScalarOpsMixin,
|
| 6 |
+
)
|
| 7 |
+
from pandas.core.arrays.boolean import BooleanArray
|
| 8 |
+
from pandas.core.arrays.categorical import Categorical
|
| 9 |
+
from pandas.core.arrays.datetimes import DatetimeArray
|
| 10 |
+
from pandas.core.arrays.floating import FloatingArray
|
| 11 |
+
from pandas.core.arrays.integer import IntegerArray
|
| 12 |
+
from pandas.core.arrays.interval import IntervalArray
|
| 13 |
+
from pandas.core.arrays.masked import BaseMaskedArray
|
| 14 |
+
from pandas.core.arrays.numpy_ import NumpyExtensionArray
|
| 15 |
+
from pandas.core.arrays.period import (
|
| 16 |
+
PeriodArray,
|
| 17 |
+
period_array,
|
| 18 |
+
)
|
| 19 |
+
from pandas.core.arrays.sparse import SparseArray
|
| 20 |
+
from pandas.core.arrays.string_ import StringArray
|
| 21 |
+
from pandas.core.arrays.string_arrow import ArrowStringArray
|
| 22 |
+
from pandas.core.arrays.timedeltas import TimedeltaArray
|
| 23 |
+
|
| 24 |
+
__all__ = [
|
| 25 |
+
"ArrowExtensionArray",
|
| 26 |
+
"ExtensionArray",
|
| 27 |
+
"ExtensionOpsMixin",
|
| 28 |
+
"ExtensionScalarOpsMixin",
|
| 29 |
+
"ArrowStringArray",
|
| 30 |
+
"BaseMaskedArray",
|
| 31 |
+
"BooleanArray",
|
| 32 |
+
"Categorical",
|
| 33 |
+
"DatetimeArray",
|
| 34 |
+
"FloatingArray",
|
| 35 |
+
"IntegerArray",
|
| 36 |
+
"IntervalArray",
|
| 37 |
+
"NumpyExtensionArray",
|
| 38 |
+
"PeriodArray",
|
| 39 |
+
"period_array",
|
| 40 |
+
"SparseArray",
|
| 41 |
+
"StringArray",
|
| 42 |
+
"TimedeltaArray",
|
| 43 |
+
]
|
videollama2/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Literal
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from pandas.compat import pa_version_under10p1
|
| 8 |
+
|
| 9 |
+
if not pa_version_under10p1:
|
| 10 |
+
import pyarrow as pa
|
| 11 |
+
import pyarrow.compute as pc
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ArrowStringArrayMixin:
|
| 15 |
+
_pa_array = None
|
| 16 |
+
|
| 17 |
+
def __init__(self, *args, **kwargs) -> None:
|
| 18 |
+
raise NotImplementedError
|
| 19 |
+
|
| 20 |
+
def _str_pad(
|
| 21 |
+
self,
|
| 22 |
+
width: int,
|
| 23 |
+
side: Literal["left", "right", "both"] = "left",
|
| 24 |
+
fillchar: str = " ",
|
| 25 |
+
):
|
| 26 |
+
if side == "left":
|
| 27 |
+
pa_pad = pc.utf8_lpad
|
| 28 |
+
elif side == "right":
|
| 29 |
+
pa_pad = pc.utf8_rpad
|
| 30 |
+
elif side == "both":
|
| 31 |
+
pa_pad = pc.utf8_center
|
| 32 |
+
else:
|
| 33 |
+
raise ValueError(
|
| 34 |
+
f"Invalid side: {side}. Side must be one of 'left', 'right', 'both'"
|
| 35 |
+
)
|
| 36 |
+
return type(self)(pa_pad(self._pa_array, width=width, padding=fillchar))
|
| 37 |
+
|
| 38 |
+
def _str_get(self, i: int):
|
| 39 |
+
lengths = pc.utf8_length(self._pa_array)
|
| 40 |
+
if i >= 0:
|
| 41 |
+
out_of_bounds = pc.greater_equal(i, lengths)
|
| 42 |
+
start = i
|
| 43 |
+
stop = i + 1
|
| 44 |
+
step = 1
|
| 45 |
+
else:
|
| 46 |
+
out_of_bounds = pc.greater(-i, lengths)
|
| 47 |
+
start = i
|
| 48 |
+
stop = i - 1
|
| 49 |
+
step = -1
|
| 50 |
+
not_out_of_bounds = pc.invert(out_of_bounds.fill_null(True))
|
| 51 |
+
selected = pc.utf8_slice_codeunits(
|
| 52 |
+
self._pa_array, start=start, stop=stop, step=step
|
| 53 |
+
)
|
| 54 |
+
null_value = pa.scalar(
|
| 55 |
+
None, type=self._pa_array.type # type: ignore[attr-defined]
|
| 56 |
+
)
|
| 57 |
+
result = pc.if_else(not_out_of_bounds, selected, null_value)
|
| 58 |
+
return type(self)(result)
|
| 59 |
+
|
| 60 |
+
def _str_slice_replace(
|
| 61 |
+
self, start: int | None = None, stop: int | None = None, repl: str | None = None
|
| 62 |
+
):
|
| 63 |
+
if repl is None:
|
| 64 |
+
repl = ""
|
| 65 |
+
if start is None:
|
| 66 |
+
start = 0
|
| 67 |
+
if stop is None:
|
| 68 |
+
stop = np.iinfo(np.int64).max
|
| 69 |
+
return type(self)(pc.utf8_replace_slice(self._pa_array, start, stop, repl))
|
| 70 |
+
|
| 71 |
+
def _str_capitalize(self):
|
| 72 |
+
return type(self)(pc.utf8_capitalize(self._pa_array))
|
| 73 |
+
|
| 74 |
+
def _str_title(self):
|
| 75 |
+
return type(self)(pc.utf8_title(self._pa_array))
|
| 76 |
+
|
| 77 |
+
def _str_swapcase(self):
|
| 78 |
+
return type(self)(pc.utf8_swapcase(self._pa_array))
|
| 79 |
+
|
| 80 |
+
def _str_removesuffix(self, suffix: str):
|
| 81 |
+
ends_with = pc.ends_with(self._pa_array, pattern=suffix)
|
| 82 |
+
removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix))
|
| 83 |
+
result = pc.if_else(ends_with, removed, self._pa_array)
|
| 84 |
+
return type(self)(result)
|
videollama2/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py
ADDED
|
@@ -0,0 +1,547 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from functools import wraps
|
| 4 |
+
from typing import (
|
| 5 |
+
TYPE_CHECKING,
|
| 6 |
+
Any,
|
| 7 |
+
Literal,
|
| 8 |
+
cast,
|
| 9 |
+
overload,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
from pandas._libs import lib
|
| 15 |
+
from pandas._libs.arrays import NDArrayBacked
|
| 16 |
+
from pandas._libs.tslibs import is_supported_dtype
|
| 17 |
+
from pandas._typing import (
|
| 18 |
+
ArrayLike,
|
| 19 |
+
AxisInt,
|
| 20 |
+
Dtype,
|
| 21 |
+
F,
|
| 22 |
+
FillnaOptions,
|
| 23 |
+
PositionalIndexer2D,
|
| 24 |
+
PositionalIndexerTuple,
|
| 25 |
+
ScalarIndexer,
|
| 26 |
+
Self,
|
| 27 |
+
SequenceIndexer,
|
| 28 |
+
Shape,
|
| 29 |
+
TakeIndexer,
|
| 30 |
+
npt,
|
| 31 |
+
)
|
| 32 |
+
from pandas.errors import AbstractMethodError
|
| 33 |
+
from pandas.util._decorators import doc
|
| 34 |
+
from pandas.util._validators import (
|
| 35 |
+
validate_bool_kwarg,
|
| 36 |
+
validate_fillna_kwargs,
|
| 37 |
+
validate_insert_loc,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
from pandas.core.dtypes.common import pandas_dtype
|
| 41 |
+
from pandas.core.dtypes.dtypes import (
|
| 42 |
+
DatetimeTZDtype,
|
| 43 |
+
ExtensionDtype,
|
| 44 |
+
PeriodDtype,
|
| 45 |
+
)
|
| 46 |
+
from pandas.core.dtypes.missing import array_equivalent
|
| 47 |
+
|
| 48 |
+
from pandas.core import missing
|
| 49 |
+
from pandas.core.algorithms import (
|
| 50 |
+
take,
|
| 51 |
+
unique,
|
| 52 |
+
value_counts_internal as value_counts,
|
| 53 |
+
)
|
| 54 |
+
from pandas.core.array_algos.quantile import quantile_with_mask
|
| 55 |
+
from pandas.core.array_algos.transforms import shift
|
| 56 |
+
from pandas.core.arrays.base import ExtensionArray
|
| 57 |
+
from pandas.core.construction import extract_array
|
| 58 |
+
from pandas.core.indexers import check_array_indexer
|
| 59 |
+
from pandas.core.sorting import nargminmax
|
| 60 |
+
|
| 61 |
+
if TYPE_CHECKING:
|
| 62 |
+
from collections.abc import Sequence
|
| 63 |
+
|
| 64 |
+
from pandas._typing import (
|
| 65 |
+
NumpySorter,
|
| 66 |
+
NumpyValueArrayLike,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
from pandas import Series
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def ravel_compat(meth: F) -> F:
|
| 73 |
+
"""
|
| 74 |
+
Decorator to ravel a 2D array before passing it to a cython operation,
|
| 75 |
+
then reshape the result to our own shape.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
@wraps(meth)
|
| 79 |
+
def method(self, *args, **kwargs):
|
| 80 |
+
if self.ndim == 1:
|
| 81 |
+
return meth(self, *args, **kwargs)
|
| 82 |
+
|
| 83 |
+
flags = self._ndarray.flags
|
| 84 |
+
flat = self.ravel("K")
|
| 85 |
+
result = meth(flat, *args, **kwargs)
|
| 86 |
+
order = "F" if flags.f_contiguous else "C"
|
| 87 |
+
return result.reshape(self.shape, order=order)
|
| 88 |
+
|
| 89 |
+
return cast(F, method)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class NDArrayBackedExtensionArray(NDArrayBacked, ExtensionArray):
|
| 93 |
+
"""
|
| 94 |
+
ExtensionArray that is backed by a single NumPy ndarray.
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
_ndarray: np.ndarray
|
| 98 |
+
|
| 99 |
+
# scalar used to denote NA value inside our self._ndarray, e.g. -1
|
| 100 |
+
# for Categorical, iNaT for Period. Outside of object dtype,
|
| 101 |
+
# self.isna() should be exactly locations in self._ndarray with
|
| 102 |
+
# _internal_fill_value.
|
| 103 |
+
_internal_fill_value: Any
|
| 104 |
+
|
| 105 |
+
def _box_func(self, x):
|
| 106 |
+
"""
|
| 107 |
+
Wrap numpy type in our dtype.type if necessary.
|
| 108 |
+
"""
|
| 109 |
+
return x
|
| 110 |
+
|
| 111 |
+
def _validate_scalar(self, value):
|
| 112 |
+
# used by NDArrayBackedExtensionIndex.insert
|
| 113 |
+
raise AbstractMethodError(self)
|
| 114 |
+
|
| 115 |
+
# ------------------------------------------------------------------------
|
| 116 |
+
|
| 117 |
+
def view(self, dtype: Dtype | None = None) -> ArrayLike:
|
| 118 |
+
# We handle datetime64, datetime64tz, timedelta64, and period
|
| 119 |
+
# dtypes here. Everything else we pass through to the underlying
|
| 120 |
+
# ndarray.
|
| 121 |
+
if dtype is None or dtype is self.dtype:
|
| 122 |
+
return self._from_backing_data(self._ndarray)
|
| 123 |
+
|
| 124 |
+
if isinstance(dtype, type):
|
| 125 |
+
# we sometimes pass non-dtype objects, e.g np.ndarray;
|
| 126 |
+
# pass those through to the underlying ndarray
|
| 127 |
+
return self._ndarray.view(dtype)
|
| 128 |
+
|
| 129 |
+
dtype = pandas_dtype(dtype)
|
| 130 |
+
arr = self._ndarray
|
| 131 |
+
|
| 132 |
+
if isinstance(dtype, PeriodDtype):
|
| 133 |
+
cls = dtype.construct_array_type()
|
| 134 |
+
return cls(arr.view("i8"), dtype=dtype)
|
| 135 |
+
elif isinstance(dtype, DatetimeTZDtype):
|
| 136 |
+
dt_cls = dtype.construct_array_type()
|
| 137 |
+
dt64_values = arr.view(f"M8[{dtype.unit}]")
|
| 138 |
+
return dt_cls._simple_new(dt64_values, dtype=dtype)
|
| 139 |
+
elif lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype):
|
| 140 |
+
from pandas.core.arrays import DatetimeArray
|
| 141 |
+
|
| 142 |
+
dt64_values = arr.view(dtype)
|
| 143 |
+
return DatetimeArray._simple_new(dt64_values, dtype=dtype)
|
| 144 |
+
|
| 145 |
+
elif lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype):
|
| 146 |
+
from pandas.core.arrays import TimedeltaArray
|
| 147 |
+
|
| 148 |
+
td64_values = arr.view(dtype)
|
| 149 |
+
return TimedeltaArray._simple_new(td64_values, dtype=dtype)
|
| 150 |
+
|
| 151 |
+
# error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible
|
| 152 |
+
# type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None,
|
| 153 |
+
# type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int,
|
| 154 |
+
# Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
|
| 155 |
+
return arr.view(dtype=dtype) # type: ignore[arg-type]
|
| 156 |
+
|
| 157 |
+
def take(
|
| 158 |
+
self,
|
| 159 |
+
indices: TakeIndexer,
|
| 160 |
+
*,
|
| 161 |
+
allow_fill: bool = False,
|
| 162 |
+
fill_value: Any = None,
|
| 163 |
+
axis: AxisInt = 0,
|
| 164 |
+
) -> Self:
|
| 165 |
+
if allow_fill:
|
| 166 |
+
fill_value = self._validate_scalar(fill_value)
|
| 167 |
+
|
| 168 |
+
new_data = take(
|
| 169 |
+
self._ndarray,
|
| 170 |
+
indices,
|
| 171 |
+
allow_fill=allow_fill,
|
| 172 |
+
fill_value=fill_value,
|
| 173 |
+
axis=axis,
|
| 174 |
+
)
|
| 175 |
+
return self._from_backing_data(new_data)
|
| 176 |
+
|
| 177 |
+
# ------------------------------------------------------------------------
|
| 178 |
+
|
| 179 |
+
def equals(self, other) -> bool:
|
| 180 |
+
if type(self) is not type(other):
|
| 181 |
+
return False
|
| 182 |
+
if self.dtype != other.dtype:
|
| 183 |
+
return False
|
| 184 |
+
return bool(array_equivalent(self._ndarray, other._ndarray, dtype_equal=True))
|
| 185 |
+
|
| 186 |
+
@classmethod
|
| 187 |
+
def _from_factorized(cls, values, original):
|
| 188 |
+
assert values.dtype == original._ndarray.dtype
|
| 189 |
+
return original._from_backing_data(values)
|
| 190 |
+
|
| 191 |
+
def _values_for_argsort(self) -> np.ndarray:
|
| 192 |
+
return self._ndarray
|
| 193 |
+
|
| 194 |
+
def _values_for_factorize(self):
|
| 195 |
+
return self._ndarray, self._internal_fill_value
|
| 196 |
+
|
| 197 |
+
def _hash_pandas_object(
|
| 198 |
+
self, *, encoding: str, hash_key: str, categorize: bool
|
| 199 |
+
) -> npt.NDArray[np.uint64]:
|
| 200 |
+
from pandas.core.util.hashing import hash_array
|
| 201 |
+
|
| 202 |
+
values = self._ndarray
|
| 203 |
+
return hash_array(
|
| 204 |
+
values, encoding=encoding, hash_key=hash_key, categorize=categorize
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
# Signature of "argmin" incompatible with supertype "ExtensionArray"
|
| 208 |
+
def argmin(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override]
|
| 209 |
+
# override base class by adding axis keyword
|
| 210 |
+
validate_bool_kwarg(skipna, "skipna")
|
| 211 |
+
if not skipna and self._hasna:
|
| 212 |
+
raise NotImplementedError
|
| 213 |
+
return nargminmax(self, "argmin", axis=axis)
|
| 214 |
+
|
| 215 |
+
# Signature of "argmax" incompatible with supertype "ExtensionArray"
|
| 216 |
+
def argmax(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override]
|
| 217 |
+
# override base class by adding axis keyword
|
| 218 |
+
validate_bool_kwarg(skipna, "skipna")
|
| 219 |
+
if not skipna and self._hasna:
|
| 220 |
+
raise NotImplementedError
|
| 221 |
+
return nargminmax(self, "argmax", axis=axis)
|
| 222 |
+
|
| 223 |
+
def unique(self) -> Self:
|
| 224 |
+
new_data = unique(self._ndarray)
|
| 225 |
+
return self._from_backing_data(new_data)
|
| 226 |
+
|
| 227 |
+
@classmethod
|
| 228 |
+
@doc(ExtensionArray._concat_same_type)
|
| 229 |
+
def _concat_same_type(
|
| 230 |
+
cls,
|
| 231 |
+
to_concat: Sequence[Self],
|
| 232 |
+
axis: AxisInt = 0,
|
| 233 |
+
) -> Self:
|
| 234 |
+
if not lib.dtypes_all_equal([x.dtype for x in to_concat]):
|
| 235 |
+
dtypes = {str(x.dtype) for x in to_concat}
|
| 236 |
+
raise ValueError("to_concat must have the same dtype", dtypes)
|
| 237 |
+
|
| 238 |
+
return super()._concat_same_type(to_concat, axis=axis)
|
| 239 |
+
|
| 240 |
+
@doc(ExtensionArray.searchsorted)
|
| 241 |
+
def searchsorted(
|
| 242 |
+
self,
|
| 243 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
| 244 |
+
side: Literal["left", "right"] = "left",
|
| 245 |
+
sorter: NumpySorter | None = None,
|
| 246 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
| 247 |
+
npvalue = self._validate_setitem_value(value)
|
| 248 |
+
return self._ndarray.searchsorted(npvalue, side=side, sorter=sorter)
|
| 249 |
+
|
| 250 |
+
@doc(ExtensionArray.shift)
|
| 251 |
+
def shift(self, periods: int = 1, fill_value=None):
|
| 252 |
+
# NB: shift is always along axis=0
|
| 253 |
+
axis = 0
|
| 254 |
+
fill_value = self._validate_scalar(fill_value)
|
| 255 |
+
new_values = shift(self._ndarray, periods, axis, fill_value)
|
| 256 |
+
|
| 257 |
+
return self._from_backing_data(new_values)
|
| 258 |
+
|
| 259 |
+
def __setitem__(self, key, value) -> None:
|
| 260 |
+
key = check_array_indexer(self, key)
|
| 261 |
+
value = self._validate_setitem_value(value)
|
| 262 |
+
self._ndarray[key] = value
|
| 263 |
+
|
| 264 |
+
def _validate_setitem_value(self, value):
|
| 265 |
+
return value
|
| 266 |
+
|
| 267 |
+
@overload
|
| 268 |
+
def __getitem__(self, key: ScalarIndexer) -> Any:
|
| 269 |
+
...
|
| 270 |
+
|
| 271 |
+
@overload
|
| 272 |
+
def __getitem__(
|
| 273 |
+
self,
|
| 274 |
+
key: SequenceIndexer | PositionalIndexerTuple,
|
| 275 |
+
) -> Self:
|
| 276 |
+
...
|
| 277 |
+
|
| 278 |
+
def __getitem__(
|
| 279 |
+
self,
|
| 280 |
+
key: PositionalIndexer2D,
|
| 281 |
+
) -> Self | Any:
|
| 282 |
+
if lib.is_integer(key):
|
| 283 |
+
# fast-path
|
| 284 |
+
result = self._ndarray[key]
|
| 285 |
+
if self.ndim == 1:
|
| 286 |
+
return self._box_func(result)
|
| 287 |
+
return self._from_backing_data(result)
|
| 288 |
+
|
| 289 |
+
# error: Incompatible types in assignment (expression has type "ExtensionArray",
|
| 290 |
+
# variable has type "Union[int, slice, ndarray]")
|
| 291 |
+
key = extract_array(key, extract_numpy=True) # type: ignore[assignment]
|
| 292 |
+
key = check_array_indexer(self, key)
|
| 293 |
+
result = self._ndarray[key]
|
| 294 |
+
if lib.is_scalar(result):
|
| 295 |
+
return self._box_func(result)
|
| 296 |
+
|
| 297 |
+
result = self._from_backing_data(result)
|
| 298 |
+
return result
|
| 299 |
+
|
| 300 |
+
def _fill_mask_inplace(
|
| 301 |
+
self, method: str, limit: int | None, mask: npt.NDArray[np.bool_]
|
| 302 |
+
) -> None:
|
| 303 |
+
# (for now) when self.ndim == 2, we assume axis=0
|
| 304 |
+
func = missing.get_fill_func(method, ndim=self.ndim)
|
| 305 |
+
func(self._ndarray.T, limit=limit, mask=mask.T)
|
| 306 |
+
|
| 307 |
+
def _pad_or_backfill(
|
| 308 |
+
self,
|
| 309 |
+
*,
|
| 310 |
+
method: FillnaOptions,
|
| 311 |
+
limit: int | None = None,
|
| 312 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
| 313 |
+
copy: bool = True,
|
| 314 |
+
) -> Self:
|
| 315 |
+
mask = self.isna()
|
| 316 |
+
if mask.any():
|
| 317 |
+
# (for now) when self.ndim == 2, we assume axis=0
|
| 318 |
+
func = missing.get_fill_func(method, ndim=self.ndim)
|
| 319 |
+
|
| 320 |
+
npvalues = self._ndarray.T
|
| 321 |
+
if copy:
|
| 322 |
+
npvalues = npvalues.copy()
|
| 323 |
+
func(npvalues, limit=limit, limit_area=limit_area, mask=mask.T)
|
| 324 |
+
npvalues = npvalues.T
|
| 325 |
+
|
| 326 |
+
if copy:
|
| 327 |
+
new_values = self._from_backing_data(npvalues)
|
| 328 |
+
else:
|
| 329 |
+
new_values = self
|
| 330 |
+
|
| 331 |
+
else:
|
| 332 |
+
if copy:
|
| 333 |
+
new_values = self.copy()
|
| 334 |
+
else:
|
| 335 |
+
new_values = self
|
| 336 |
+
return new_values
|
| 337 |
+
|
| 338 |
+
@doc(ExtensionArray.fillna)
|
| 339 |
+
def fillna(
|
| 340 |
+
self, value=None, method=None, limit: int | None = None, copy: bool = True
|
| 341 |
+
) -> Self:
|
| 342 |
+
value, method = validate_fillna_kwargs(
|
| 343 |
+
value, method, validate_scalar_dict_value=False
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
mask = self.isna()
|
| 347 |
+
# error: Argument 2 to "check_value_size" has incompatible type
|
| 348 |
+
# "ExtensionArray"; expected "ndarray"
|
| 349 |
+
value = missing.check_value_size(
|
| 350 |
+
value, mask, len(self) # type: ignore[arg-type]
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
if mask.any():
|
| 354 |
+
if method is not None:
|
| 355 |
+
# (for now) when self.ndim == 2, we assume axis=0
|
| 356 |
+
func = missing.get_fill_func(method, ndim=self.ndim)
|
| 357 |
+
npvalues = self._ndarray.T
|
| 358 |
+
if copy:
|
| 359 |
+
npvalues = npvalues.copy()
|
| 360 |
+
func(npvalues, limit=limit, mask=mask.T)
|
| 361 |
+
npvalues = npvalues.T
|
| 362 |
+
|
| 363 |
+
# TODO: NumpyExtensionArray didn't used to copy, need tests
|
| 364 |
+
# for this
|
| 365 |
+
new_values = self._from_backing_data(npvalues)
|
| 366 |
+
else:
|
| 367 |
+
# fill with value
|
| 368 |
+
if copy:
|
| 369 |
+
new_values = self.copy()
|
| 370 |
+
else:
|
| 371 |
+
new_values = self[:]
|
| 372 |
+
new_values[mask] = value
|
| 373 |
+
else:
|
| 374 |
+
# We validate the fill_value even if there is nothing to fill
|
| 375 |
+
if value is not None:
|
| 376 |
+
self._validate_setitem_value(value)
|
| 377 |
+
|
| 378 |
+
if not copy:
|
| 379 |
+
new_values = self[:]
|
| 380 |
+
else:
|
| 381 |
+
new_values = self.copy()
|
| 382 |
+
return new_values
|
| 383 |
+
|
| 384 |
+
# ------------------------------------------------------------------------
|
| 385 |
+
# Reductions
|
| 386 |
+
|
| 387 |
+
def _wrap_reduction_result(self, axis: AxisInt | None, result):
|
| 388 |
+
if axis is None or self.ndim == 1:
|
| 389 |
+
return self._box_func(result)
|
| 390 |
+
return self._from_backing_data(result)
|
| 391 |
+
|
| 392 |
+
# ------------------------------------------------------------------------
|
| 393 |
+
# __array_function__ methods
|
| 394 |
+
|
| 395 |
+
def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
|
| 396 |
+
"""
|
| 397 |
+
Analogue to np.putmask(self, mask, value)
|
| 398 |
+
|
| 399 |
+
Parameters
|
| 400 |
+
----------
|
| 401 |
+
mask : np.ndarray[bool]
|
| 402 |
+
value : scalar or listlike
|
| 403 |
+
|
| 404 |
+
Raises
|
| 405 |
+
------
|
| 406 |
+
TypeError
|
| 407 |
+
If value cannot be cast to self.dtype.
|
| 408 |
+
"""
|
| 409 |
+
value = self._validate_setitem_value(value)
|
| 410 |
+
|
| 411 |
+
np.putmask(self._ndarray, mask, value)
|
| 412 |
+
|
| 413 |
+
def _where(self: Self, mask: npt.NDArray[np.bool_], value) -> Self:
|
| 414 |
+
"""
|
| 415 |
+
Analogue to np.where(mask, self, value)
|
| 416 |
+
|
| 417 |
+
Parameters
|
| 418 |
+
----------
|
| 419 |
+
mask : np.ndarray[bool]
|
| 420 |
+
value : scalar or listlike
|
| 421 |
+
|
| 422 |
+
Raises
|
| 423 |
+
------
|
| 424 |
+
TypeError
|
| 425 |
+
If value cannot be cast to self.dtype.
|
| 426 |
+
"""
|
| 427 |
+
value = self._validate_setitem_value(value)
|
| 428 |
+
|
| 429 |
+
res_values = np.where(mask, self._ndarray, value)
|
| 430 |
+
if res_values.dtype != self._ndarray.dtype:
|
| 431 |
+
raise AssertionError(
|
| 432 |
+
# GH#56410
|
| 433 |
+
"Something has gone wrong, please report a bug at "
|
| 434 |
+
"github.com/pandas-dev/pandas/"
|
| 435 |
+
)
|
| 436 |
+
return self._from_backing_data(res_values)
|
| 437 |
+
|
| 438 |
+
# ------------------------------------------------------------------------
|
| 439 |
+
# Index compat methods
|
| 440 |
+
|
| 441 |
+
def insert(self, loc: int, item) -> Self:
|
| 442 |
+
"""
|
| 443 |
+
Make new ExtensionArray inserting new item at location. Follows
|
| 444 |
+
Python list.append semantics for negative values.
|
| 445 |
+
|
| 446 |
+
Parameters
|
| 447 |
+
----------
|
| 448 |
+
loc : int
|
| 449 |
+
item : object
|
| 450 |
+
|
| 451 |
+
Returns
|
| 452 |
+
-------
|
| 453 |
+
type(self)
|
| 454 |
+
"""
|
| 455 |
+
loc = validate_insert_loc(loc, len(self))
|
| 456 |
+
|
| 457 |
+
code = self._validate_scalar(item)
|
| 458 |
+
|
| 459 |
+
new_vals = np.concatenate(
|
| 460 |
+
(
|
| 461 |
+
self._ndarray[:loc],
|
| 462 |
+
np.asarray([code], dtype=self._ndarray.dtype),
|
| 463 |
+
self._ndarray[loc:],
|
| 464 |
+
)
|
| 465 |
+
)
|
| 466 |
+
return self._from_backing_data(new_vals)
|
| 467 |
+
|
| 468 |
+
# ------------------------------------------------------------------------
|
| 469 |
+
# Additional array methods
|
| 470 |
+
# These are not part of the EA API, but we implement them because
|
| 471 |
+
# pandas assumes they're there.
|
| 472 |
+
|
| 473 |
+
def value_counts(self, dropna: bool = True) -> Series:
|
| 474 |
+
"""
|
| 475 |
+
Return a Series containing counts of unique values.
|
| 476 |
+
|
| 477 |
+
Parameters
|
| 478 |
+
----------
|
| 479 |
+
dropna : bool, default True
|
| 480 |
+
Don't include counts of NA values.
|
| 481 |
+
|
| 482 |
+
Returns
|
| 483 |
+
-------
|
| 484 |
+
Series
|
| 485 |
+
"""
|
| 486 |
+
if self.ndim != 1:
|
| 487 |
+
raise NotImplementedError
|
| 488 |
+
|
| 489 |
+
from pandas import (
|
| 490 |
+
Index,
|
| 491 |
+
Series,
|
| 492 |
+
)
|
| 493 |
+
|
| 494 |
+
if dropna:
|
| 495 |
+
# error: Unsupported operand type for ~ ("ExtensionArray")
|
| 496 |
+
values = self[~self.isna()]._ndarray # type: ignore[operator]
|
| 497 |
+
else:
|
| 498 |
+
values = self._ndarray
|
| 499 |
+
|
| 500 |
+
result = value_counts(values, sort=False, dropna=dropna)
|
| 501 |
+
|
| 502 |
+
index_arr = self._from_backing_data(np.asarray(result.index._data))
|
| 503 |
+
index = Index(index_arr, name=result.index.name)
|
| 504 |
+
return Series(result._values, index=index, name=result.name, copy=False)
|
| 505 |
+
|
| 506 |
+
def _quantile(
|
| 507 |
+
self,
|
| 508 |
+
qs: npt.NDArray[np.float64],
|
| 509 |
+
interpolation: str,
|
| 510 |
+
) -> Self:
|
| 511 |
+
# TODO: disable for Categorical if not ordered?
|
| 512 |
+
|
| 513 |
+
mask = np.asarray(self.isna())
|
| 514 |
+
arr = self._ndarray
|
| 515 |
+
fill_value = self._internal_fill_value
|
| 516 |
+
|
| 517 |
+
res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation)
|
| 518 |
+
|
| 519 |
+
res_values = self._cast_quantile_result(res_values)
|
| 520 |
+
return self._from_backing_data(res_values)
|
| 521 |
+
|
| 522 |
+
# TODO: see if we can share this with other dispatch-wrapping methods
|
| 523 |
+
def _cast_quantile_result(self, res_values: np.ndarray) -> np.ndarray:
|
| 524 |
+
"""
|
| 525 |
+
Cast the result of quantile_with_mask to an appropriate dtype
|
| 526 |
+
to pass to _from_backing_data in _quantile.
|
| 527 |
+
"""
|
| 528 |
+
return res_values
|
| 529 |
+
|
| 530 |
+
# ------------------------------------------------------------------------
|
| 531 |
+
# numpy-like methods
|
| 532 |
+
|
| 533 |
+
@classmethod
|
| 534 |
+
def _empty(cls, shape: Shape, dtype: ExtensionDtype) -> Self:
|
| 535 |
+
"""
|
| 536 |
+
Analogous to np.empty(shape, dtype=dtype)
|
| 537 |
+
|
| 538 |
+
Parameters
|
| 539 |
+
----------
|
| 540 |
+
shape : tuple[int]
|
| 541 |
+
dtype : ExtensionDtype
|
| 542 |
+
"""
|
| 543 |
+
# The base implementation uses a naive approach to find the dtype
|
| 544 |
+
# for the backing ndarray
|
| 545 |
+
arr = cls._from_sequence([], dtype=dtype)
|
| 546 |
+
backing = np.empty(shape, dtype=arr._ndarray.dtype)
|
| 547 |
+
return arr._from_backing_data(backing)
|
videollama2/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Helper functions to generate range-like data for DatetimeArray
|
| 3 |
+
(and possibly TimedeltaArray/PeriodArray)
|
| 4 |
+
"""
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
from typing import TYPE_CHECKING
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from pandas._libs.lib import i8max
|
| 12 |
+
from pandas._libs.tslibs import (
|
| 13 |
+
BaseOffset,
|
| 14 |
+
OutOfBoundsDatetime,
|
| 15 |
+
Timedelta,
|
| 16 |
+
Timestamp,
|
| 17 |
+
iNaT,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from pandas._typing import npt
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def generate_regular_range(
|
| 25 |
+
start: Timestamp | Timedelta | None,
|
| 26 |
+
end: Timestamp | Timedelta | None,
|
| 27 |
+
periods: int | None,
|
| 28 |
+
freq: BaseOffset,
|
| 29 |
+
unit: str = "ns",
|
| 30 |
+
) -> npt.NDArray[np.intp]:
|
| 31 |
+
"""
|
| 32 |
+
Generate a range of dates or timestamps with the spans between dates
|
| 33 |
+
described by the given `freq` DateOffset.
|
| 34 |
+
|
| 35 |
+
Parameters
|
| 36 |
+
----------
|
| 37 |
+
start : Timedelta, Timestamp or None
|
| 38 |
+
First point of produced date range.
|
| 39 |
+
end : Timedelta, Timestamp or None
|
| 40 |
+
Last point of produced date range.
|
| 41 |
+
periods : int or None
|
| 42 |
+
Number of periods in produced date range.
|
| 43 |
+
freq : Tick
|
| 44 |
+
Describes space between dates in produced date range.
|
| 45 |
+
unit : str, default "ns"
|
| 46 |
+
The resolution the output is meant to represent.
|
| 47 |
+
|
| 48 |
+
Returns
|
| 49 |
+
-------
|
| 50 |
+
ndarray[np.int64]
|
| 51 |
+
Representing the given resolution.
|
| 52 |
+
"""
|
| 53 |
+
istart = start._value if start is not None else None
|
| 54 |
+
iend = end._value if end is not None else None
|
| 55 |
+
freq.nanos # raises if non-fixed frequency
|
| 56 |
+
td = Timedelta(freq)
|
| 57 |
+
b: int
|
| 58 |
+
e: int
|
| 59 |
+
try:
|
| 60 |
+
td = td.as_unit(unit, round_ok=False)
|
| 61 |
+
except ValueError as err:
|
| 62 |
+
raise ValueError(
|
| 63 |
+
f"freq={freq} is incompatible with unit={unit}. "
|
| 64 |
+
"Use a lower freq or a higher unit instead."
|
| 65 |
+
) from err
|
| 66 |
+
stride = int(td._value)
|
| 67 |
+
|
| 68 |
+
if periods is None and istart is not None and iend is not None:
|
| 69 |
+
b = istart
|
| 70 |
+
# cannot just use e = Timestamp(end) + 1 because arange breaks when
|
| 71 |
+
# stride is too large, see GH10887
|
| 72 |
+
e = b + (iend - b) // stride * stride + stride // 2 + 1
|
| 73 |
+
elif istart is not None and periods is not None:
|
| 74 |
+
b = istart
|
| 75 |
+
e = _generate_range_overflow_safe(b, periods, stride, side="start")
|
| 76 |
+
elif iend is not None and periods is not None:
|
| 77 |
+
e = iend + stride
|
| 78 |
+
b = _generate_range_overflow_safe(e, periods, stride, side="end")
|
| 79 |
+
else:
|
| 80 |
+
raise ValueError(
|
| 81 |
+
"at least 'start' or 'end' should be specified if a 'period' is given."
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
with np.errstate(over="raise"):
|
| 85 |
+
# If the range is sufficiently large, np.arange may overflow
|
| 86 |
+
# and incorrectly return an empty array if not caught.
|
| 87 |
+
try:
|
| 88 |
+
values = np.arange(b, e, stride, dtype=np.int64)
|
| 89 |
+
except FloatingPointError:
|
| 90 |
+
xdr = [b]
|
| 91 |
+
while xdr[-1] != e:
|
| 92 |
+
xdr.append(xdr[-1] + stride)
|
| 93 |
+
values = np.array(xdr[:-1], dtype=np.int64)
|
| 94 |
+
return values
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def _generate_range_overflow_safe(
|
| 98 |
+
endpoint: int, periods: int, stride: int, side: str = "start"
|
| 99 |
+
) -> int:
|
| 100 |
+
"""
|
| 101 |
+
Calculate the second endpoint for passing to np.arange, checking
|
| 102 |
+
to avoid an integer overflow. Catch OverflowError and re-raise
|
| 103 |
+
as OutOfBoundsDatetime.
|
| 104 |
+
|
| 105 |
+
Parameters
|
| 106 |
+
----------
|
| 107 |
+
endpoint : int
|
| 108 |
+
nanosecond timestamp of the known endpoint of the desired range
|
| 109 |
+
periods : int
|
| 110 |
+
number of periods in the desired range
|
| 111 |
+
stride : int
|
| 112 |
+
nanoseconds between periods in the desired range
|
| 113 |
+
side : {'start', 'end'}
|
| 114 |
+
which end of the range `endpoint` refers to
|
| 115 |
+
|
| 116 |
+
Returns
|
| 117 |
+
-------
|
| 118 |
+
other_end : int
|
| 119 |
+
|
| 120 |
+
Raises
|
| 121 |
+
------
|
| 122 |
+
OutOfBoundsDatetime
|
| 123 |
+
"""
|
| 124 |
+
# GH#14187 raise instead of incorrectly wrapping around
|
| 125 |
+
assert side in ["start", "end"]
|
| 126 |
+
|
| 127 |
+
i64max = np.uint64(i8max)
|
| 128 |
+
msg = f"Cannot generate range with {side}={endpoint} and periods={periods}"
|
| 129 |
+
|
| 130 |
+
with np.errstate(over="raise"):
|
| 131 |
+
# if periods * strides cannot be multiplied within the *uint64* bounds,
|
| 132 |
+
# we cannot salvage the operation by recursing, so raise
|
| 133 |
+
try:
|
| 134 |
+
addend = np.uint64(periods) * np.uint64(np.abs(stride))
|
| 135 |
+
except FloatingPointError as err:
|
| 136 |
+
raise OutOfBoundsDatetime(msg) from err
|
| 137 |
+
|
| 138 |
+
if np.abs(addend) <= i64max:
|
| 139 |
+
# relatively easy case without casting concerns
|
| 140 |
+
return _generate_range_overflow_safe_signed(endpoint, periods, stride, side)
|
| 141 |
+
|
| 142 |
+
elif (endpoint > 0 and side == "start" and stride > 0) or (
|
| 143 |
+
endpoint < 0 < stride and side == "end"
|
| 144 |
+
):
|
| 145 |
+
# no chance of not-overflowing
|
| 146 |
+
raise OutOfBoundsDatetime(msg)
|
| 147 |
+
|
| 148 |
+
elif side == "end" and endpoint - stride <= i64max < endpoint:
|
| 149 |
+
# in _generate_regular_range we added `stride` thereby overflowing
|
| 150 |
+
# the bounds. Adjust to fix this.
|
| 151 |
+
return _generate_range_overflow_safe(
|
| 152 |
+
endpoint - stride, periods - 1, stride, side
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
# split into smaller pieces
|
| 156 |
+
mid_periods = periods // 2
|
| 157 |
+
remaining = periods - mid_periods
|
| 158 |
+
assert 0 < remaining < periods, (remaining, periods, endpoint, stride)
|
| 159 |
+
|
| 160 |
+
midpoint = int(_generate_range_overflow_safe(endpoint, mid_periods, stride, side))
|
| 161 |
+
return _generate_range_overflow_safe(midpoint, remaining, stride, side)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def _generate_range_overflow_safe_signed(
|
| 165 |
+
endpoint: int, periods: int, stride: int, side: str
|
| 166 |
+
) -> int:
|
| 167 |
+
"""
|
| 168 |
+
A special case for _generate_range_overflow_safe where `periods * stride`
|
| 169 |
+
can be calculated without overflowing int64 bounds.
|
| 170 |
+
"""
|
| 171 |
+
assert side in ["start", "end"]
|
| 172 |
+
if side == "end":
|
| 173 |
+
stride *= -1
|
| 174 |
+
|
| 175 |
+
with np.errstate(over="raise"):
|
| 176 |
+
addend = np.int64(periods) * np.int64(stride)
|
| 177 |
+
try:
|
| 178 |
+
# easy case with no overflows
|
| 179 |
+
result = np.int64(endpoint) + addend
|
| 180 |
+
if result == iNaT:
|
| 181 |
+
# Putting this into a DatetimeArray/TimedeltaArray
|
| 182 |
+
# would incorrectly be interpreted as NaT
|
| 183 |
+
raise OverflowError
|
| 184 |
+
return int(result)
|
| 185 |
+
except (FloatingPointError, OverflowError):
|
| 186 |
+
# with endpoint negative and addend positive we risk
|
| 187 |
+
# FloatingPointError; with reversed signed we risk OverflowError
|
| 188 |
+
pass
|
| 189 |
+
|
| 190 |
+
# if stride and endpoint had opposite signs, then endpoint + addend
|
| 191 |
+
# should never overflow. so they must have the same signs
|
| 192 |
+
assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0)
|
| 193 |
+
|
| 194 |
+
if stride > 0:
|
| 195 |
+
# watch out for very special case in which we just slightly
|
| 196 |
+
# exceed implementation bounds, but when passing the result to
|
| 197 |
+
# np.arange will get a result slightly within the bounds
|
| 198 |
+
|
| 199 |
+
uresult = np.uint64(endpoint) + np.uint64(addend)
|
| 200 |
+
i64max = np.uint64(i8max)
|
| 201 |
+
assert uresult > i64max
|
| 202 |
+
if uresult <= i64max + np.uint64(stride):
|
| 203 |
+
return int(uresult)
|
| 204 |
+
|
| 205 |
+
raise OutOfBoundsDatetime(
|
| 206 |
+
f"Cannot generate range with {side}={endpoint} and periods={periods}"
|
| 207 |
+
)
|
videollama2/lib/python3.10/site-packages/pandas/core/arrays/_utils.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import (
|
| 4 |
+
TYPE_CHECKING,
|
| 5 |
+
Any,
|
| 6 |
+
)
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
from pandas._libs import lib
|
| 11 |
+
from pandas.errors import LossySetitemError
|
| 12 |
+
|
| 13 |
+
from pandas.core.dtypes.cast import np_can_hold_element
|
| 14 |
+
from pandas.core.dtypes.common import is_numeric_dtype
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from pandas._typing import (
|
| 18 |
+
ArrayLike,
|
| 19 |
+
npt,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def to_numpy_dtype_inference(
|
| 24 |
+
arr: ArrayLike, dtype: npt.DTypeLike | None, na_value, hasna: bool
|
| 25 |
+
) -> tuple[npt.DTypeLike, Any]:
|
| 26 |
+
if dtype is None and is_numeric_dtype(arr.dtype):
|
| 27 |
+
dtype_given = False
|
| 28 |
+
if hasna:
|
| 29 |
+
if arr.dtype.kind == "b":
|
| 30 |
+
dtype = np.dtype(np.object_)
|
| 31 |
+
else:
|
| 32 |
+
if arr.dtype.kind in "iu":
|
| 33 |
+
dtype = np.dtype(np.float64)
|
| 34 |
+
else:
|
| 35 |
+
dtype = arr.dtype.numpy_dtype # type: ignore[union-attr]
|
| 36 |
+
if na_value is lib.no_default:
|
| 37 |
+
na_value = np.nan
|
| 38 |
+
else:
|
| 39 |
+
dtype = arr.dtype.numpy_dtype # type: ignore[union-attr]
|
| 40 |
+
elif dtype is not None:
|
| 41 |
+
dtype = np.dtype(dtype)
|
| 42 |
+
dtype_given = True
|
| 43 |
+
else:
|
| 44 |
+
dtype_given = True
|
| 45 |
+
|
| 46 |
+
if na_value is lib.no_default:
|
| 47 |
+
if dtype is None or not hasna:
|
| 48 |
+
na_value = arr.dtype.na_value
|
| 49 |
+
elif dtype.kind == "f": # type: ignore[union-attr]
|
| 50 |
+
na_value = np.nan
|
| 51 |
+
elif dtype.kind == "M": # type: ignore[union-attr]
|
| 52 |
+
na_value = np.datetime64("nat")
|
| 53 |
+
elif dtype.kind == "m": # type: ignore[union-attr]
|
| 54 |
+
na_value = np.timedelta64("nat")
|
| 55 |
+
else:
|
| 56 |
+
na_value = arr.dtype.na_value
|
| 57 |
+
|
| 58 |
+
if not dtype_given and hasna:
|
| 59 |
+
try:
|
| 60 |
+
np_can_hold_element(dtype, na_value) # type: ignore[arg-type]
|
| 61 |
+
except LossySetitemError:
|
| 62 |
+
dtype = np.dtype(np.object_)
|
| 63 |
+
return dtype, na_value
|
videollama2/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py
ADDED
|
@@ -0,0 +1,2556 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from datetime import (
|
| 4 |
+
datetime,
|
| 5 |
+
timedelta,
|
| 6 |
+
)
|
| 7 |
+
from functools import wraps
|
| 8 |
+
import operator
|
| 9 |
+
from typing import (
|
| 10 |
+
TYPE_CHECKING,
|
| 11 |
+
Any,
|
| 12 |
+
Callable,
|
| 13 |
+
Literal,
|
| 14 |
+
Union,
|
| 15 |
+
cast,
|
| 16 |
+
final,
|
| 17 |
+
overload,
|
| 18 |
+
)
|
| 19 |
+
import warnings
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
|
| 23 |
+
from pandas._libs import (
|
| 24 |
+
algos,
|
| 25 |
+
lib,
|
| 26 |
+
)
|
| 27 |
+
from pandas._libs.arrays import NDArrayBacked
|
| 28 |
+
from pandas._libs.tslibs import (
|
| 29 |
+
BaseOffset,
|
| 30 |
+
IncompatibleFrequency,
|
| 31 |
+
NaT,
|
| 32 |
+
NaTType,
|
| 33 |
+
Period,
|
| 34 |
+
Resolution,
|
| 35 |
+
Tick,
|
| 36 |
+
Timedelta,
|
| 37 |
+
Timestamp,
|
| 38 |
+
add_overflowsafe,
|
| 39 |
+
astype_overflowsafe,
|
| 40 |
+
get_unit_from_dtype,
|
| 41 |
+
iNaT,
|
| 42 |
+
ints_to_pydatetime,
|
| 43 |
+
ints_to_pytimedelta,
|
| 44 |
+
periods_per_day,
|
| 45 |
+
to_offset,
|
| 46 |
+
)
|
| 47 |
+
from pandas._libs.tslibs.fields import (
|
| 48 |
+
RoundTo,
|
| 49 |
+
round_nsint64,
|
| 50 |
+
)
|
| 51 |
+
from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions
|
| 52 |
+
from pandas._libs.tslibs.timedeltas import get_unit_for_round
|
| 53 |
+
from pandas._libs.tslibs.timestamps import integer_op_not_supported
|
| 54 |
+
from pandas._typing import (
|
| 55 |
+
ArrayLike,
|
| 56 |
+
AxisInt,
|
| 57 |
+
DatetimeLikeScalar,
|
| 58 |
+
Dtype,
|
| 59 |
+
DtypeObj,
|
| 60 |
+
F,
|
| 61 |
+
InterpolateOptions,
|
| 62 |
+
NpDtype,
|
| 63 |
+
PositionalIndexer2D,
|
| 64 |
+
PositionalIndexerTuple,
|
| 65 |
+
ScalarIndexer,
|
| 66 |
+
Self,
|
| 67 |
+
SequenceIndexer,
|
| 68 |
+
TimeAmbiguous,
|
| 69 |
+
TimeNonexistent,
|
| 70 |
+
npt,
|
| 71 |
+
)
|
| 72 |
+
from pandas.compat.numpy import function as nv
|
| 73 |
+
from pandas.errors import (
|
| 74 |
+
AbstractMethodError,
|
| 75 |
+
InvalidComparison,
|
| 76 |
+
PerformanceWarning,
|
| 77 |
+
)
|
| 78 |
+
from pandas.util._decorators import (
|
| 79 |
+
Appender,
|
| 80 |
+
Substitution,
|
| 81 |
+
cache_readonly,
|
| 82 |
+
)
|
| 83 |
+
from pandas.util._exceptions import find_stack_level
|
| 84 |
+
|
| 85 |
+
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
|
| 86 |
+
from pandas.core.dtypes.common import (
|
| 87 |
+
is_all_strings,
|
| 88 |
+
is_integer_dtype,
|
| 89 |
+
is_list_like,
|
| 90 |
+
is_object_dtype,
|
| 91 |
+
is_string_dtype,
|
| 92 |
+
pandas_dtype,
|
| 93 |
+
)
|
| 94 |
+
from pandas.core.dtypes.dtypes import (
|
| 95 |
+
ArrowDtype,
|
| 96 |
+
CategoricalDtype,
|
| 97 |
+
DatetimeTZDtype,
|
| 98 |
+
ExtensionDtype,
|
| 99 |
+
PeriodDtype,
|
| 100 |
+
)
|
| 101 |
+
from pandas.core.dtypes.generic import (
|
| 102 |
+
ABCCategorical,
|
| 103 |
+
ABCMultiIndex,
|
| 104 |
+
)
|
| 105 |
+
from pandas.core.dtypes.missing import (
|
| 106 |
+
is_valid_na_for_dtype,
|
| 107 |
+
isna,
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
from pandas.core import (
|
| 111 |
+
algorithms,
|
| 112 |
+
missing,
|
| 113 |
+
nanops,
|
| 114 |
+
ops,
|
| 115 |
+
)
|
| 116 |
+
from pandas.core.algorithms import (
|
| 117 |
+
isin,
|
| 118 |
+
map_array,
|
| 119 |
+
unique1d,
|
| 120 |
+
)
|
| 121 |
+
from pandas.core.array_algos import datetimelike_accumulations
|
| 122 |
+
from pandas.core.arraylike import OpsMixin
|
| 123 |
+
from pandas.core.arrays._mixins import (
|
| 124 |
+
NDArrayBackedExtensionArray,
|
| 125 |
+
ravel_compat,
|
| 126 |
+
)
|
| 127 |
+
from pandas.core.arrays.arrow.array import ArrowExtensionArray
|
| 128 |
+
from pandas.core.arrays.base import ExtensionArray
|
| 129 |
+
from pandas.core.arrays.integer import IntegerArray
|
| 130 |
+
import pandas.core.common as com
|
| 131 |
+
from pandas.core.construction import (
|
| 132 |
+
array as pd_array,
|
| 133 |
+
ensure_wrapped_if_datetimelike,
|
| 134 |
+
extract_array,
|
| 135 |
+
)
|
| 136 |
+
from pandas.core.indexers import (
|
| 137 |
+
check_array_indexer,
|
| 138 |
+
check_setitem_lengths,
|
| 139 |
+
)
|
| 140 |
+
from pandas.core.ops.common import unpack_zerodim_and_defer
|
| 141 |
+
from pandas.core.ops.invalid import (
|
| 142 |
+
invalid_comparison,
|
| 143 |
+
make_invalid_op,
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
from pandas.tseries import frequencies
|
| 147 |
+
|
| 148 |
+
if TYPE_CHECKING:
|
| 149 |
+
from collections.abc import (
|
| 150 |
+
Iterator,
|
| 151 |
+
Sequence,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
from pandas import Index
|
| 155 |
+
from pandas.core.arrays import (
|
| 156 |
+
DatetimeArray,
|
| 157 |
+
PeriodArray,
|
| 158 |
+
TimedeltaArray,
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType]
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def _make_unpacked_invalid_op(op_name: str):
|
| 165 |
+
op = make_invalid_op(op_name)
|
| 166 |
+
return unpack_zerodim_and_defer(op_name)(op)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def _period_dispatch(meth: F) -> F:
|
| 170 |
+
"""
|
| 171 |
+
For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results
|
| 172 |
+
in PeriodArray. We cannot use ._ndarray directly for the affected
|
| 173 |
+
methods because the i8 data has different semantics on NaT values.
|
| 174 |
+
"""
|
| 175 |
+
|
| 176 |
+
@wraps(meth)
|
| 177 |
+
def new_meth(self, *args, **kwargs):
|
| 178 |
+
if not isinstance(self.dtype, PeriodDtype):
|
| 179 |
+
return meth(self, *args, **kwargs)
|
| 180 |
+
|
| 181 |
+
arr = self.view("M8[ns]")
|
| 182 |
+
result = meth(arr, *args, **kwargs)
|
| 183 |
+
if result is NaT:
|
| 184 |
+
return NaT
|
| 185 |
+
elif isinstance(result, Timestamp):
|
| 186 |
+
return self._box_func(result._value)
|
| 187 |
+
|
| 188 |
+
res_i8 = result.view("i8")
|
| 189 |
+
return self._from_backing_data(res_i8)
|
| 190 |
+
|
| 191 |
+
return cast(F, new_meth)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
|
| 195 |
+
# incompatible with definition in base class "ExtensionArray"
|
| 196 |
+
class DatetimeLikeArrayMixin( # type: ignore[misc]
|
| 197 |
+
OpsMixin, NDArrayBackedExtensionArray
|
| 198 |
+
):
|
| 199 |
+
"""
|
| 200 |
+
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
|
| 201 |
+
|
| 202 |
+
Assumes that __new__/__init__ defines:
|
| 203 |
+
_ndarray
|
| 204 |
+
|
| 205 |
+
and that inheriting subclass implements:
|
| 206 |
+
freq
|
| 207 |
+
"""
|
| 208 |
+
|
| 209 |
+
# _infer_matches -> which infer_dtype strings are close enough to our own
|
| 210 |
+
_infer_matches: tuple[str, ...]
|
| 211 |
+
_is_recognized_dtype: Callable[[DtypeObj], bool]
|
| 212 |
+
_recognized_scalars: tuple[type, ...]
|
| 213 |
+
_ndarray: np.ndarray
|
| 214 |
+
freq: BaseOffset | None
|
| 215 |
+
|
| 216 |
+
@cache_readonly
|
| 217 |
+
def _can_hold_na(self) -> bool:
|
| 218 |
+
return True
|
| 219 |
+
|
| 220 |
+
def __init__(
|
| 221 |
+
self, data, dtype: Dtype | None = None, freq=None, copy: bool = False
|
| 222 |
+
) -> None:
|
| 223 |
+
raise AbstractMethodError(self)
|
| 224 |
+
|
| 225 |
+
@property
|
| 226 |
+
def _scalar_type(self) -> type[DatetimeLikeScalar]:
|
| 227 |
+
"""
|
| 228 |
+
The scalar associated with this datelike
|
| 229 |
+
|
| 230 |
+
* PeriodArray : Period
|
| 231 |
+
* DatetimeArray : Timestamp
|
| 232 |
+
* TimedeltaArray : Timedelta
|
| 233 |
+
"""
|
| 234 |
+
raise AbstractMethodError(self)
|
| 235 |
+
|
| 236 |
+
def _scalar_from_string(self, value: str) -> DTScalarOrNaT:
|
| 237 |
+
"""
|
| 238 |
+
Construct a scalar type from a string.
|
| 239 |
+
|
| 240 |
+
Parameters
|
| 241 |
+
----------
|
| 242 |
+
value : str
|
| 243 |
+
|
| 244 |
+
Returns
|
| 245 |
+
-------
|
| 246 |
+
Period, Timestamp, or Timedelta, or NaT
|
| 247 |
+
Whatever the type of ``self._scalar_type`` is.
|
| 248 |
+
|
| 249 |
+
Notes
|
| 250 |
+
-----
|
| 251 |
+
This should call ``self._check_compatible_with`` before
|
| 252 |
+
unboxing the result.
|
| 253 |
+
"""
|
| 254 |
+
raise AbstractMethodError(self)
|
| 255 |
+
|
| 256 |
+
def _unbox_scalar(
|
| 257 |
+
self, value: DTScalarOrNaT
|
| 258 |
+
) -> np.int64 | np.datetime64 | np.timedelta64:
|
| 259 |
+
"""
|
| 260 |
+
Unbox the integer value of a scalar `value`.
|
| 261 |
+
|
| 262 |
+
Parameters
|
| 263 |
+
----------
|
| 264 |
+
value : Period, Timestamp, Timedelta, or NaT
|
| 265 |
+
Depending on subclass.
|
| 266 |
+
|
| 267 |
+
Returns
|
| 268 |
+
-------
|
| 269 |
+
int
|
| 270 |
+
|
| 271 |
+
Examples
|
| 272 |
+
--------
|
| 273 |
+
>>> arr = pd.array(np.array(['1970-01-01'], 'datetime64[ns]'))
|
| 274 |
+
>>> arr._unbox_scalar(arr[0])
|
| 275 |
+
numpy.datetime64('1970-01-01T00:00:00.000000000')
|
| 276 |
+
"""
|
| 277 |
+
raise AbstractMethodError(self)
|
| 278 |
+
|
| 279 |
+
def _check_compatible_with(self, other: DTScalarOrNaT) -> None:
|
| 280 |
+
"""
|
| 281 |
+
Verify that `self` and `other` are compatible.
|
| 282 |
+
|
| 283 |
+
* DatetimeArray verifies that the timezones (if any) match
|
| 284 |
+
* PeriodArray verifies that the freq matches
|
| 285 |
+
* Timedelta has no verification
|
| 286 |
+
|
| 287 |
+
In each case, NaT is considered compatible.
|
| 288 |
+
|
| 289 |
+
Parameters
|
| 290 |
+
----------
|
| 291 |
+
other
|
| 292 |
+
|
| 293 |
+
Raises
|
| 294 |
+
------
|
| 295 |
+
Exception
|
| 296 |
+
"""
|
| 297 |
+
raise AbstractMethodError(self)
|
| 298 |
+
|
| 299 |
+
# ------------------------------------------------------------------
|
| 300 |
+
|
| 301 |
+
def _box_func(self, x):
|
| 302 |
+
"""
|
| 303 |
+
box function to get object from internal representation
|
| 304 |
+
"""
|
| 305 |
+
raise AbstractMethodError(self)
|
| 306 |
+
|
| 307 |
+
def _box_values(self, values) -> np.ndarray:
|
| 308 |
+
"""
|
| 309 |
+
apply box func to passed values
|
| 310 |
+
"""
|
| 311 |
+
return lib.map_infer(values, self._box_func, convert=False)
|
| 312 |
+
|
| 313 |
+
def __iter__(self) -> Iterator:
|
| 314 |
+
if self.ndim > 1:
|
| 315 |
+
return (self[n] for n in range(len(self)))
|
| 316 |
+
else:
|
| 317 |
+
return (self._box_func(v) for v in self.asi8)
|
| 318 |
+
|
| 319 |
+
@property
|
| 320 |
+
def asi8(self) -> npt.NDArray[np.int64]:
|
| 321 |
+
"""
|
| 322 |
+
Integer representation of the values.
|
| 323 |
+
|
| 324 |
+
Returns
|
| 325 |
+
-------
|
| 326 |
+
ndarray
|
| 327 |
+
An ndarray with int64 dtype.
|
| 328 |
+
"""
|
| 329 |
+
# do not cache or you'll create a memory leak
|
| 330 |
+
return self._ndarray.view("i8")
|
| 331 |
+
|
| 332 |
+
# ----------------------------------------------------------------
|
| 333 |
+
# Rendering Methods
|
| 334 |
+
|
| 335 |
+
def _format_native_types(
|
| 336 |
+
self, *, na_rep: str | float = "NaT", date_format=None
|
| 337 |
+
) -> npt.NDArray[np.object_]:
|
| 338 |
+
"""
|
| 339 |
+
Helper method for astype when converting to strings.
|
| 340 |
+
|
| 341 |
+
Returns
|
| 342 |
+
-------
|
| 343 |
+
ndarray[str]
|
| 344 |
+
"""
|
| 345 |
+
raise AbstractMethodError(self)
|
| 346 |
+
|
| 347 |
+
def _formatter(self, boxed: bool = False):
|
| 348 |
+
# TODO: Remove Datetime & DatetimeTZ formatters.
|
| 349 |
+
return "'{}'".format
|
| 350 |
+
|
| 351 |
+
# ----------------------------------------------------------------
|
| 352 |
+
# Array-Like / EA-Interface Methods
|
| 353 |
+
|
| 354 |
+
def __array__(
|
| 355 |
+
self, dtype: NpDtype | None = None, copy: bool | None = None
|
| 356 |
+
) -> np.ndarray:
|
| 357 |
+
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
|
| 358 |
+
if is_object_dtype(dtype):
|
| 359 |
+
return np.array(list(self), dtype=object)
|
| 360 |
+
return self._ndarray
|
| 361 |
+
|
| 362 |
+
@overload
|
| 363 |
+
def __getitem__(self, item: ScalarIndexer) -> DTScalarOrNaT:
|
| 364 |
+
...
|
| 365 |
+
|
| 366 |
+
@overload
|
| 367 |
+
def __getitem__(
|
| 368 |
+
self,
|
| 369 |
+
item: SequenceIndexer | PositionalIndexerTuple,
|
| 370 |
+
) -> Self:
|
| 371 |
+
...
|
| 372 |
+
|
| 373 |
+
def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT:
|
| 374 |
+
"""
|
| 375 |
+
This getitem defers to the underlying array, which by-definition can
|
| 376 |
+
only handle list-likes, slices, and integer scalars
|
| 377 |
+
"""
|
| 378 |
+
# Use cast as we know we will get back a DatetimeLikeArray or DTScalar,
|
| 379 |
+
# but skip evaluating the Union at runtime for performance
|
| 380 |
+
# (see https://github.com/pandas-dev/pandas/pull/44624)
|
| 381 |
+
result = cast("Union[Self, DTScalarOrNaT]", super().__getitem__(key))
|
| 382 |
+
if lib.is_scalar(result):
|
| 383 |
+
return result
|
| 384 |
+
else:
|
| 385 |
+
# At this point we know the result is an array.
|
| 386 |
+
result = cast(Self, result)
|
| 387 |
+
result._freq = self._get_getitem_freq(key)
|
| 388 |
+
return result
|
| 389 |
+
|
| 390 |
+
def _get_getitem_freq(self, key) -> BaseOffset | None:
|
| 391 |
+
"""
|
| 392 |
+
Find the `freq` attribute to assign to the result of a __getitem__ lookup.
|
| 393 |
+
"""
|
| 394 |
+
is_period = isinstance(self.dtype, PeriodDtype)
|
| 395 |
+
if is_period:
|
| 396 |
+
freq = self.freq
|
| 397 |
+
elif self.ndim != 1:
|
| 398 |
+
freq = None
|
| 399 |
+
else:
|
| 400 |
+
key = check_array_indexer(self, key) # maybe ndarray[bool] -> slice
|
| 401 |
+
freq = None
|
| 402 |
+
if isinstance(key, slice):
|
| 403 |
+
if self.freq is not None and key.step is not None:
|
| 404 |
+
freq = key.step * self.freq
|
| 405 |
+
else:
|
| 406 |
+
freq = self.freq
|
| 407 |
+
elif key is Ellipsis:
|
| 408 |
+
# GH#21282 indexing with Ellipsis is similar to a full slice,
|
| 409 |
+
# should preserve `freq` attribute
|
| 410 |
+
freq = self.freq
|
| 411 |
+
elif com.is_bool_indexer(key):
|
| 412 |
+
new_key = lib.maybe_booleans_to_slice(key.view(np.uint8))
|
| 413 |
+
if isinstance(new_key, slice):
|
| 414 |
+
return self._get_getitem_freq(new_key)
|
| 415 |
+
return freq
|
| 416 |
+
|
| 417 |
+
# error: Argument 1 of "__setitem__" is incompatible with supertype
|
| 418 |
+
# "ExtensionArray"; supertype defines the argument type as "Union[int,
|
| 419 |
+
# ndarray]"
|
| 420 |
+
def __setitem__(
|
| 421 |
+
self,
|
| 422 |
+
key: int | Sequence[int] | Sequence[bool] | slice,
|
| 423 |
+
value: NaTType | Any | Sequence[Any],
|
| 424 |
+
) -> None:
|
| 425 |
+
# I'm fudging the types a bit here. "Any" above really depends
|
| 426 |
+
# on type(self). For PeriodArray, it's Period (or stuff coercible
|
| 427 |
+
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
|
| 428 |
+
# I don't know if mypy can do that, possibly with Generics.
|
| 429 |
+
# https://mypy.readthedocs.io/en/latest/generics.html
|
| 430 |
+
|
| 431 |
+
no_op = check_setitem_lengths(key, value, self)
|
| 432 |
+
|
| 433 |
+
# Calling super() before the no_op short-circuit means that we raise
|
| 434 |
+
# on invalid 'value' even if this is a no-op, e.g. wrong-dtype empty array.
|
| 435 |
+
super().__setitem__(key, value)
|
| 436 |
+
|
| 437 |
+
if no_op:
|
| 438 |
+
return
|
| 439 |
+
|
| 440 |
+
self._maybe_clear_freq()
|
| 441 |
+
|
| 442 |
+
def _maybe_clear_freq(self) -> None:
|
| 443 |
+
# inplace operations like __setitem__ may invalidate the freq of
|
| 444 |
+
# DatetimeArray and TimedeltaArray
|
| 445 |
+
pass
|
| 446 |
+
|
| 447 |
+
def astype(self, dtype, copy: bool = True):
|
| 448 |
+
# Some notes on cases we don't have to handle here in the base class:
|
| 449 |
+
# 1. PeriodArray.astype handles period -> period
|
| 450 |
+
# 2. DatetimeArray.astype handles conversion between tz.
|
| 451 |
+
# 3. DatetimeArray.astype handles datetime -> period
|
| 452 |
+
dtype = pandas_dtype(dtype)
|
| 453 |
+
|
| 454 |
+
if dtype == object:
|
| 455 |
+
if self.dtype.kind == "M":
|
| 456 |
+
self = cast("DatetimeArray", self)
|
| 457 |
+
# *much* faster than self._box_values
|
| 458 |
+
# for e.g. test_get_loc_tuple_monotonic_above_size_cutoff
|
| 459 |
+
i8data = self.asi8
|
| 460 |
+
converted = ints_to_pydatetime(
|
| 461 |
+
i8data,
|
| 462 |
+
tz=self.tz,
|
| 463 |
+
box="timestamp",
|
| 464 |
+
reso=self._creso,
|
| 465 |
+
)
|
| 466 |
+
return converted
|
| 467 |
+
|
| 468 |
+
elif self.dtype.kind == "m":
|
| 469 |
+
return ints_to_pytimedelta(self._ndarray, box=True)
|
| 470 |
+
|
| 471 |
+
return self._box_values(self.asi8.ravel()).reshape(self.shape)
|
| 472 |
+
|
| 473 |
+
elif isinstance(dtype, ExtensionDtype):
|
| 474 |
+
return super().astype(dtype, copy=copy)
|
| 475 |
+
elif is_string_dtype(dtype):
|
| 476 |
+
return self._format_native_types()
|
| 477 |
+
elif dtype.kind in "iu":
|
| 478 |
+
# we deliberately ignore int32 vs. int64 here.
|
| 479 |
+
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
|
| 480 |
+
values = self.asi8
|
| 481 |
+
if dtype != np.int64:
|
| 482 |
+
raise TypeError(
|
| 483 |
+
f"Converting from {self.dtype} to {dtype} is not supported. "
|
| 484 |
+
"Do obj.astype('int64').astype(dtype) instead"
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
if copy:
|
| 488 |
+
values = values.copy()
|
| 489 |
+
return values
|
| 490 |
+
elif (dtype.kind in "mM" and self.dtype != dtype) or dtype.kind == "f":
|
| 491 |
+
# disallow conversion between datetime/timedelta,
|
| 492 |
+
# and conversions for any datetimelike to float
|
| 493 |
+
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
|
| 494 |
+
raise TypeError(msg)
|
| 495 |
+
else:
|
| 496 |
+
return np.asarray(self, dtype=dtype)
|
| 497 |
+
|
| 498 |
+
@overload
|
| 499 |
+
def view(self) -> Self:
|
| 500 |
+
...
|
| 501 |
+
|
| 502 |
+
@overload
|
| 503 |
+
def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray:
|
| 504 |
+
...
|
| 505 |
+
|
| 506 |
+
@overload
|
| 507 |
+
def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray:
|
| 508 |
+
...
|
| 509 |
+
|
| 510 |
+
@overload
|
| 511 |
+
def view(self, dtype: Dtype | None = ...) -> ArrayLike:
|
| 512 |
+
...
|
| 513 |
+
|
| 514 |
+
# pylint: disable-next=useless-parent-delegation
|
| 515 |
+
def view(self, dtype: Dtype | None = None) -> ArrayLike:
|
| 516 |
+
# we need to explicitly call super() method as long as the `@overload`s
|
| 517 |
+
# are present in this file.
|
| 518 |
+
return super().view(dtype)
|
| 519 |
+
|
| 520 |
+
# ------------------------------------------------------------------
|
| 521 |
+
# Validation Methods
|
| 522 |
+
# TODO: try to de-duplicate these, ensure identical behavior
|
| 523 |
+
|
| 524 |
+
def _validate_comparison_value(self, other):
|
| 525 |
+
if isinstance(other, str):
|
| 526 |
+
try:
|
| 527 |
+
# GH#18435 strings get a pass from tzawareness compat
|
| 528 |
+
other = self._scalar_from_string(other)
|
| 529 |
+
except (ValueError, IncompatibleFrequency):
|
| 530 |
+
# failed to parse as Timestamp/Timedelta/Period
|
| 531 |
+
raise InvalidComparison(other)
|
| 532 |
+
|
| 533 |
+
if isinstance(other, self._recognized_scalars) or other is NaT:
|
| 534 |
+
other = self._scalar_type(other)
|
| 535 |
+
try:
|
| 536 |
+
self._check_compatible_with(other)
|
| 537 |
+
except (TypeError, IncompatibleFrequency) as err:
|
| 538 |
+
# e.g. tzawareness mismatch
|
| 539 |
+
raise InvalidComparison(other) from err
|
| 540 |
+
|
| 541 |
+
elif not is_list_like(other):
|
| 542 |
+
raise InvalidComparison(other)
|
| 543 |
+
|
| 544 |
+
elif len(other) != len(self):
|
| 545 |
+
raise ValueError("Lengths must match")
|
| 546 |
+
|
| 547 |
+
else:
|
| 548 |
+
try:
|
| 549 |
+
other = self._validate_listlike(other, allow_object=True)
|
| 550 |
+
self._check_compatible_with(other)
|
| 551 |
+
except (TypeError, IncompatibleFrequency) as err:
|
| 552 |
+
if is_object_dtype(getattr(other, "dtype", None)):
|
| 553 |
+
# We will have to operate element-wise
|
| 554 |
+
pass
|
| 555 |
+
else:
|
| 556 |
+
raise InvalidComparison(other) from err
|
| 557 |
+
|
| 558 |
+
return other
|
| 559 |
+
|
| 560 |
+
def _validate_scalar(
|
| 561 |
+
self,
|
| 562 |
+
value,
|
| 563 |
+
*,
|
| 564 |
+
allow_listlike: bool = False,
|
| 565 |
+
unbox: bool = True,
|
| 566 |
+
):
|
| 567 |
+
"""
|
| 568 |
+
Validate that the input value can be cast to our scalar_type.
|
| 569 |
+
|
| 570 |
+
Parameters
|
| 571 |
+
----------
|
| 572 |
+
value : object
|
| 573 |
+
allow_listlike: bool, default False
|
| 574 |
+
When raising an exception, whether the message should say
|
| 575 |
+
listlike inputs are allowed.
|
| 576 |
+
unbox : bool, default True
|
| 577 |
+
Whether to unbox the result before returning. Note: unbox=False
|
| 578 |
+
skips the setitem compatibility check.
|
| 579 |
+
|
| 580 |
+
Returns
|
| 581 |
+
-------
|
| 582 |
+
self._scalar_type or NaT
|
| 583 |
+
"""
|
| 584 |
+
if isinstance(value, self._scalar_type):
|
| 585 |
+
pass
|
| 586 |
+
|
| 587 |
+
elif isinstance(value, str):
|
| 588 |
+
# NB: Careful about tzawareness
|
| 589 |
+
try:
|
| 590 |
+
value = self._scalar_from_string(value)
|
| 591 |
+
except ValueError as err:
|
| 592 |
+
msg = self._validation_error_message(value, allow_listlike)
|
| 593 |
+
raise TypeError(msg) from err
|
| 594 |
+
|
| 595 |
+
elif is_valid_na_for_dtype(value, self.dtype):
|
| 596 |
+
# GH#18295
|
| 597 |
+
value = NaT
|
| 598 |
+
|
| 599 |
+
elif isna(value):
|
| 600 |
+
# if we are dt64tz and value is dt64("NaT"), dont cast to NaT,
|
| 601 |
+
# or else we'll fail to raise in _unbox_scalar
|
| 602 |
+
msg = self._validation_error_message(value, allow_listlike)
|
| 603 |
+
raise TypeError(msg)
|
| 604 |
+
|
| 605 |
+
elif isinstance(value, self._recognized_scalars):
|
| 606 |
+
# error: Argument 1 to "Timestamp" has incompatible type "object"; expected
|
| 607 |
+
# "integer[Any] | float | str | date | datetime | datetime64"
|
| 608 |
+
value = self._scalar_type(value) # type: ignore[arg-type]
|
| 609 |
+
|
| 610 |
+
else:
|
| 611 |
+
msg = self._validation_error_message(value, allow_listlike)
|
| 612 |
+
raise TypeError(msg)
|
| 613 |
+
|
| 614 |
+
if not unbox:
|
| 615 |
+
# NB: In general NDArrayBackedExtensionArray will unbox here;
|
| 616 |
+
# this option exists to prevent a performance hit in
|
| 617 |
+
# TimedeltaIndex.get_loc
|
| 618 |
+
return value
|
| 619 |
+
return self._unbox_scalar(value)
|
| 620 |
+
|
| 621 |
+
def _validation_error_message(self, value, allow_listlike: bool = False) -> str:
|
| 622 |
+
"""
|
| 623 |
+
Construct an exception message on validation error.
|
| 624 |
+
|
| 625 |
+
Some methods allow only scalar inputs, while others allow either scalar
|
| 626 |
+
or listlike.
|
| 627 |
+
|
| 628 |
+
Parameters
|
| 629 |
+
----------
|
| 630 |
+
allow_listlike: bool, default False
|
| 631 |
+
|
| 632 |
+
Returns
|
| 633 |
+
-------
|
| 634 |
+
str
|
| 635 |
+
"""
|
| 636 |
+
if hasattr(value, "dtype") and getattr(value, "ndim", 0) > 0:
|
| 637 |
+
msg_got = f"{value.dtype} array"
|
| 638 |
+
else:
|
| 639 |
+
msg_got = f"'{type(value).__name__}'"
|
| 640 |
+
if allow_listlike:
|
| 641 |
+
msg = (
|
| 642 |
+
f"value should be a '{self._scalar_type.__name__}', 'NaT', "
|
| 643 |
+
f"or array of those. Got {msg_got} instead."
|
| 644 |
+
)
|
| 645 |
+
else:
|
| 646 |
+
msg = (
|
| 647 |
+
f"value should be a '{self._scalar_type.__name__}' or 'NaT'. "
|
| 648 |
+
f"Got {msg_got} instead."
|
| 649 |
+
)
|
| 650 |
+
return msg
|
| 651 |
+
|
| 652 |
+
def _validate_listlike(self, value, allow_object: bool = False):
|
| 653 |
+
if isinstance(value, type(self)):
|
| 654 |
+
if self.dtype.kind in "mM" and not allow_object:
|
| 655 |
+
# error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
|
| 656 |
+
value = value.as_unit(self.unit, round_ok=False) # type: ignore[attr-defined]
|
| 657 |
+
return value
|
| 658 |
+
|
| 659 |
+
if isinstance(value, list) and len(value) == 0:
|
| 660 |
+
# We treat empty list as our own dtype.
|
| 661 |
+
return type(self)._from_sequence([], dtype=self.dtype)
|
| 662 |
+
|
| 663 |
+
if hasattr(value, "dtype") and value.dtype == object:
|
| 664 |
+
# `array` below won't do inference if value is an Index or Series.
|
| 665 |
+
# so do so here. in the Index case, inferred_type may be cached.
|
| 666 |
+
if lib.infer_dtype(value) in self._infer_matches:
|
| 667 |
+
try:
|
| 668 |
+
value = type(self)._from_sequence(value)
|
| 669 |
+
except (ValueError, TypeError):
|
| 670 |
+
if allow_object:
|
| 671 |
+
return value
|
| 672 |
+
msg = self._validation_error_message(value, True)
|
| 673 |
+
raise TypeError(msg)
|
| 674 |
+
|
| 675 |
+
# Do type inference if necessary up front (after unpacking
|
| 676 |
+
# NumpyExtensionArray)
|
| 677 |
+
# e.g. we passed PeriodIndex.values and got an ndarray of Periods
|
| 678 |
+
value = extract_array(value, extract_numpy=True)
|
| 679 |
+
value = pd_array(value)
|
| 680 |
+
value = extract_array(value, extract_numpy=True)
|
| 681 |
+
|
| 682 |
+
if is_all_strings(value):
|
| 683 |
+
# We got a StringArray
|
| 684 |
+
try:
|
| 685 |
+
# TODO: Could use from_sequence_of_strings if implemented
|
| 686 |
+
# Note: passing dtype is necessary for PeriodArray tests
|
| 687 |
+
value = type(self)._from_sequence(value, dtype=self.dtype)
|
| 688 |
+
except ValueError:
|
| 689 |
+
pass
|
| 690 |
+
|
| 691 |
+
if isinstance(value.dtype, CategoricalDtype):
|
| 692 |
+
# e.g. we have a Categorical holding self.dtype
|
| 693 |
+
if value.categories.dtype == self.dtype:
|
| 694 |
+
# TODO: do we need equal dtype or just comparable?
|
| 695 |
+
value = value._internal_get_values()
|
| 696 |
+
value = extract_array(value, extract_numpy=True)
|
| 697 |
+
|
| 698 |
+
if allow_object and is_object_dtype(value.dtype):
|
| 699 |
+
pass
|
| 700 |
+
|
| 701 |
+
elif not type(self)._is_recognized_dtype(value.dtype):
|
| 702 |
+
msg = self._validation_error_message(value, True)
|
| 703 |
+
raise TypeError(msg)
|
| 704 |
+
|
| 705 |
+
if self.dtype.kind in "mM" and not allow_object:
|
| 706 |
+
# error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
|
| 707 |
+
value = value.as_unit(self.unit, round_ok=False) # type: ignore[attr-defined]
|
| 708 |
+
return value
|
| 709 |
+
|
| 710 |
+
def _validate_setitem_value(self, value):
|
| 711 |
+
if is_list_like(value):
|
| 712 |
+
value = self._validate_listlike(value)
|
| 713 |
+
else:
|
| 714 |
+
return self._validate_scalar(value, allow_listlike=True)
|
| 715 |
+
|
| 716 |
+
return self._unbox(value)
|
| 717 |
+
|
| 718 |
+
@final
|
| 719 |
+
def _unbox(self, other) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray:
|
| 720 |
+
"""
|
| 721 |
+
Unbox either a scalar with _unbox_scalar or an instance of our own type.
|
| 722 |
+
"""
|
| 723 |
+
if lib.is_scalar(other):
|
| 724 |
+
other = self._unbox_scalar(other)
|
| 725 |
+
else:
|
| 726 |
+
# same type as self
|
| 727 |
+
self._check_compatible_with(other)
|
| 728 |
+
other = other._ndarray
|
| 729 |
+
return other
|
| 730 |
+
|
| 731 |
+
# ------------------------------------------------------------------
|
| 732 |
+
# Additional array methods
|
| 733 |
+
# These are not part of the EA API, but we implement them because
|
| 734 |
+
# pandas assumes they're there.
|
| 735 |
+
|
| 736 |
+
@ravel_compat
|
| 737 |
+
def map(self, mapper, na_action=None):
|
| 738 |
+
from pandas import Index
|
| 739 |
+
|
| 740 |
+
result = map_array(self, mapper, na_action=na_action)
|
| 741 |
+
result = Index(result)
|
| 742 |
+
|
| 743 |
+
if isinstance(result, ABCMultiIndex):
|
| 744 |
+
return result.to_numpy()
|
| 745 |
+
else:
|
| 746 |
+
return result.array
|
| 747 |
+
|
| 748 |
+
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
|
| 749 |
+
"""
|
| 750 |
+
Compute boolean array of whether each value is found in the
|
| 751 |
+
passed set of values.
|
| 752 |
+
|
| 753 |
+
Parameters
|
| 754 |
+
----------
|
| 755 |
+
values : np.ndarray or ExtensionArray
|
| 756 |
+
|
| 757 |
+
Returns
|
| 758 |
+
-------
|
| 759 |
+
ndarray[bool]
|
| 760 |
+
"""
|
| 761 |
+
if values.dtype.kind in "fiuc":
|
| 762 |
+
# TODO: de-duplicate with equals, validate_comparison_value
|
| 763 |
+
return np.zeros(self.shape, dtype=bool)
|
| 764 |
+
|
| 765 |
+
values = ensure_wrapped_if_datetimelike(values)
|
| 766 |
+
|
| 767 |
+
if not isinstance(values, type(self)):
|
| 768 |
+
inferable = [
|
| 769 |
+
"timedelta",
|
| 770 |
+
"timedelta64",
|
| 771 |
+
"datetime",
|
| 772 |
+
"datetime64",
|
| 773 |
+
"date",
|
| 774 |
+
"period",
|
| 775 |
+
]
|
| 776 |
+
if values.dtype == object:
|
| 777 |
+
values = lib.maybe_convert_objects(
|
| 778 |
+
values, # type: ignore[arg-type]
|
| 779 |
+
convert_non_numeric=True,
|
| 780 |
+
dtype_if_all_nat=self.dtype,
|
| 781 |
+
)
|
| 782 |
+
if values.dtype != object:
|
| 783 |
+
return self.isin(values)
|
| 784 |
+
|
| 785 |
+
inferred = lib.infer_dtype(values, skipna=False)
|
| 786 |
+
if inferred not in inferable:
|
| 787 |
+
if inferred == "string":
|
| 788 |
+
pass
|
| 789 |
+
|
| 790 |
+
elif "mixed" in inferred:
|
| 791 |
+
return isin(self.astype(object), values)
|
| 792 |
+
else:
|
| 793 |
+
return np.zeros(self.shape, dtype=bool)
|
| 794 |
+
|
| 795 |
+
try:
|
| 796 |
+
values = type(self)._from_sequence(values)
|
| 797 |
+
except ValueError:
|
| 798 |
+
return isin(self.astype(object), values)
|
| 799 |
+
else:
|
| 800 |
+
warnings.warn(
|
| 801 |
+
# GH#53111
|
| 802 |
+
f"The behavior of 'isin' with dtype={self.dtype} and "
|
| 803 |
+
"castable values (e.g. strings) is deprecated. In a "
|
| 804 |
+
"future version, these will not be considered matching "
|
| 805 |
+
"by isin. Explicitly cast to the appropriate dtype before "
|
| 806 |
+
"calling isin instead.",
|
| 807 |
+
FutureWarning,
|
| 808 |
+
stacklevel=find_stack_level(),
|
| 809 |
+
)
|
| 810 |
+
|
| 811 |
+
if self.dtype.kind in "mM":
|
| 812 |
+
self = cast("DatetimeArray | TimedeltaArray", self)
|
| 813 |
+
# error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
|
| 814 |
+
# has no attribute "as_unit"
|
| 815 |
+
values = values.as_unit(self.unit) # type: ignore[union-attr]
|
| 816 |
+
|
| 817 |
+
try:
|
| 818 |
+
# error: Argument 1 to "_check_compatible_with" of "DatetimeLikeArrayMixin"
|
| 819 |
+
# has incompatible type "ExtensionArray | ndarray[Any, Any]"; expected
|
| 820 |
+
# "Period | Timestamp | Timedelta | NaTType"
|
| 821 |
+
self._check_compatible_with(values) # type: ignore[arg-type]
|
| 822 |
+
except (TypeError, ValueError):
|
| 823 |
+
# Includes tzawareness mismatch and IncompatibleFrequencyError
|
| 824 |
+
return np.zeros(self.shape, dtype=bool)
|
| 825 |
+
|
| 826 |
+
# error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
|
| 827 |
+
# has no attribute "asi8"
|
| 828 |
+
return isin(self.asi8, values.asi8) # type: ignore[union-attr]
|
| 829 |
+
|
| 830 |
+
# ------------------------------------------------------------------
|
| 831 |
+
# Null Handling
|
| 832 |
+
|
| 833 |
+
def isna(self) -> npt.NDArray[np.bool_]:
|
| 834 |
+
return self._isnan
|
| 835 |
+
|
| 836 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
| 837 |
+
def _isnan(self) -> npt.NDArray[np.bool_]:
|
| 838 |
+
"""
|
| 839 |
+
return if each value is nan
|
| 840 |
+
"""
|
| 841 |
+
return self.asi8 == iNaT
|
| 842 |
+
|
| 843 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
| 844 |
+
def _hasna(self) -> bool:
|
| 845 |
+
"""
|
| 846 |
+
return if I have any nans; enables various perf speedups
|
| 847 |
+
"""
|
| 848 |
+
return bool(self._isnan.any())
|
| 849 |
+
|
| 850 |
+
def _maybe_mask_results(
|
| 851 |
+
self, result: np.ndarray, fill_value=iNaT, convert=None
|
| 852 |
+
) -> np.ndarray:
|
| 853 |
+
"""
|
| 854 |
+
Parameters
|
| 855 |
+
----------
|
| 856 |
+
result : np.ndarray
|
| 857 |
+
fill_value : object, default iNaT
|
| 858 |
+
convert : str, dtype or None
|
| 859 |
+
|
| 860 |
+
Returns
|
| 861 |
+
-------
|
| 862 |
+
result : ndarray with values replace by the fill_value
|
| 863 |
+
|
| 864 |
+
mask the result if needed, convert to the provided dtype if its not
|
| 865 |
+
None
|
| 866 |
+
|
| 867 |
+
This is an internal routine.
|
| 868 |
+
"""
|
| 869 |
+
if self._hasna:
|
| 870 |
+
if convert:
|
| 871 |
+
result = result.astype(convert)
|
| 872 |
+
if fill_value is None:
|
| 873 |
+
fill_value = np.nan
|
| 874 |
+
np.putmask(result, self._isnan, fill_value)
|
| 875 |
+
return result
|
| 876 |
+
|
| 877 |
+
# ------------------------------------------------------------------
|
| 878 |
+
# Frequency Properties/Methods
|
| 879 |
+
|
| 880 |
+
@property
|
| 881 |
+
def freqstr(self) -> str | None:
|
| 882 |
+
"""
|
| 883 |
+
Return the frequency object as a string if it's set, otherwise None.
|
| 884 |
+
|
| 885 |
+
Examples
|
| 886 |
+
--------
|
| 887 |
+
For DatetimeIndex:
|
| 888 |
+
|
| 889 |
+
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00"], freq="D")
|
| 890 |
+
>>> idx.freqstr
|
| 891 |
+
'D'
|
| 892 |
+
|
| 893 |
+
The frequency can be inferred if there are more than 2 points:
|
| 894 |
+
|
| 895 |
+
>>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"],
|
| 896 |
+
... freq="infer")
|
| 897 |
+
>>> idx.freqstr
|
| 898 |
+
'2D'
|
| 899 |
+
|
| 900 |
+
For PeriodIndex:
|
| 901 |
+
|
| 902 |
+
>>> idx = pd.PeriodIndex(["2023-1", "2023-2", "2023-3"], freq="M")
|
| 903 |
+
>>> idx.freqstr
|
| 904 |
+
'M'
|
| 905 |
+
"""
|
| 906 |
+
if self.freq is None:
|
| 907 |
+
return None
|
| 908 |
+
return self.freq.freqstr
|
| 909 |
+
|
| 910 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
| 911 |
+
def inferred_freq(self) -> str | None:
|
| 912 |
+
"""
|
| 913 |
+
Tries to return a string representing a frequency generated by infer_freq.
|
| 914 |
+
|
| 915 |
+
Returns None if it can't autodetect the frequency.
|
| 916 |
+
|
| 917 |
+
Examples
|
| 918 |
+
--------
|
| 919 |
+
For DatetimeIndex:
|
| 920 |
+
|
| 921 |
+
>>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"])
|
| 922 |
+
>>> idx.inferred_freq
|
| 923 |
+
'2D'
|
| 924 |
+
|
| 925 |
+
For TimedeltaIndex:
|
| 926 |
+
|
| 927 |
+
>>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"])
|
| 928 |
+
>>> tdelta_idx
|
| 929 |
+
TimedeltaIndex(['0 days', '10 days', '20 days'],
|
| 930 |
+
dtype='timedelta64[ns]', freq=None)
|
| 931 |
+
>>> tdelta_idx.inferred_freq
|
| 932 |
+
'10D'
|
| 933 |
+
"""
|
| 934 |
+
if self.ndim != 1:
|
| 935 |
+
return None
|
| 936 |
+
try:
|
| 937 |
+
return frequencies.infer_freq(self)
|
| 938 |
+
except ValueError:
|
| 939 |
+
return None
|
| 940 |
+
|
| 941 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
| 942 |
+
def _resolution_obj(self) -> Resolution | None:
|
| 943 |
+
freqstr = self.freqstr
|
| 944 |
+
if freqstr is None:
|
| 945 |
+
return None
|
| 946 |
+
try:
|
| 947 |
+
return Resolution.get_reso_from_freqstr(freqstr)
|
| 948 |
+
except KeyError:
|
| 949 |
+
return None
|
| 950 |
+
|
| 951 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
| 952 |
+
def resolution(self) -> str:
|
| 953 |
+
"""
|
| 954 |
+
Returns day, hour, minute, second, millisecond or microsecond
|
| 955 |
+
"""
|
| 956 |
+
# error: Item "None" of "Optional[Any]" has no attribute "attrname"
|
| 957 |
+
return self._resolution_obj.attrname # type: ignore[union-attr]
|
| 958 |
+
|
| 959 |
+
# monotonicity/uniqueness properties are called via frequencies.infer_freq,
|
| 960 |
+
# see GH#23789
|
| 961 |
+
|
| 962 |
+
@property
|
| 963 |
+
def _is_monotonic_increasing(self) -> bool:
|
| 964 |
+
return algos.is_monotonic(self.asi8, timelike=True)[0]
|
| 965 |
+
|
| 966 |
+
@property
|
| 967 |
+
def _is_monotonic_decreasing(self) -> bool:
|
| 968 |
+
return algos.is_monotonic(self.asi8, timelike=True)[1]
|
| 969 |
+
|
| 970 |
+
@property
|
| 971 |
+
def _is_unique(self) -> bool:
|
| 972 |
+
return len(unique1d(self.asi8.ravel("K"))) == self.size
|
| 973 |
+
|
| 974 |
+
# ------------------------------------------------------------------
|
| 975 |
+
# Arithmetic Methods
|
| 976 |
+
|
| 977 |
+
def _cmp_method(self, other, op):
|
| 978 |
+
if self.ndim > 1 and getattr(other, "shape", None) == self.shape:
|
| 979 |
+
# TODO: handle 2D-like listlikes
|
| 980 |
+
return op(self.ravel(), other.ravel()).reshape(self.shape)
|
| 981 |
+
|
| 982 |
+
try:
|
| 983 |
+
other = self._validate_comparison_value(other)
|
| 984 |
+
except InvalidComparison:
|
| 985 |
+
return invalid_comparison(self, other, op)
|
| 986 |
+
|
| 987 |
+
dtype = getattr(other, "dtype", None)
|
| 988 |
+
if is_object_dtype(dtype):
|
| 989 |
+
# We have to use comp_method_OBJECT_ARRAY instead of numpy
|
| 990 |
+
# comparison otherwise it would raise when comparing to None
|
| 991 |
+
result = ops.comp_method_OBJECT_ARRAY(
|
| 992 |
+
op, np.asarray(self.astype(object)), other
|
| 993 |
+
)
|
| 994 |
+
return result
|
| 995 |
+
if other is NaT:
|
| 996 |
+
if op is operator.ne:
|
| 997 |
+
result = np.ones(self.shape, dtype=bool)
|
| 998 |
+
else:
|
| 999 |
+
result = np.zeros(self.shape, dtype=bool)
|
| 1000 |
+
return result
|
| 1001 |
+
|
| 1002 |
+
if not isinstance(self.dtype, PeriodDtype):
|
| 1003 |
+
self = cast(TimelikeOps, self)
|
| 1004 |
+
if self._creso != other._creso:
|
| 1005 |
+
if not isinstance(other, type(self)):
|
| 1006 |
+
# i.e. Timedelta/Timestamp, cast to ndarray and let
|
| 1007 |
+
# compare_mismatched_resolutions handle broadcasting
|
| 1008 |
+
try:
|
| 1009 |
+
# GH#52080 see if we can losslessly cast to shared unit
|
| 1010 |
+
other = other.as_unit(self.unit, round_ok=False)
|
| 1011 |
+
except ValueError:
|
| 1012 |
+
other_arr = np.array(other.asm8)
|
| 1013 |
+
return compare_mismatched_resolutions(
|
| 1014 |
+
self._ndarray, other_arr, op
|
| 1015 |
+
)
|
| 1016 |
+
else:
|
| 1017 |
+
other_arr = other._ndarray
|
| 1018 |
+
return compare_mismatched_resolutions(self._ndarray, other_arr, op)
|
| 1019 |
+
|
| 1020 |
+
other_vals = self._unbox(other)
|
| 1021 |
+
# GH#37462 comparison on i8 values is almost 2x faster than M8/m8
|
| 1022 |
+
result = op(self._ndarray.view("i8"), other_vals.view("i8"))
|
| 1023 |
+
|
| 1024 |
+
o_mask = isna(other)
|
| 1025 |
+
mask = self._isnan | o_mask
|
| 1026 |
+
if mask.any():
|
| 1027 |
+
nat_result = op is operator.ne
|
| 1028 |
+
np.putmask(result, mask, nat_result)
|
| 1029 |
+
|
| 1030 |
+
return result
|
| 1031 |
+
|
| 1032 |
+
# pow is invalid for all three subclasses; TimedeltaArray will override
|
| 1033 |
+
# the multiplication and division ops
|
| 1034 |
+
__pow__ = _make_unpacked_invalid_op("__pow__")
|
| 1035 |
+
__rpow__ = _make_unpacked_invalid_op("__rpow__")
|
| 1036 |
+
__mul__ = _make_unpacked_invalid_op("__mul__")
|
| 1037 |
+
__rmul__ = _make_unpacked_invalid_op("__rmul__")
|
| 1038 |
+
__truediv__ = _make_unpacked_invalid_op("__truediv__")
|
| 1039 |
+
__rtruediv__ = _make_unpacked_invalid_op("__rtruediv__")
|
| 1040 |
+
__floordiv__ = _make_unpacked_invalid_op("__floordiv__")
|
| 1041 |
+
__rfloordiv__ = _make_unpacked_invalid_op("__rfloordiv__")
|
| 1042 |
+
__mod__ = _make_unpacked_invalid_op("__mod__")
|
| 1043 |
+
__rmod__ = _make_unpacked_invalid_op("__rmod__")
|
| 1044 |
+
__divmod__ = _make_unpacked_invalid_op("__divmod__")
|
| 1045 |
+
__rdivmod__ = _make_unpacked_invalid_op("__rdivmod__")
|
| 1046 |
+
|
| 1047 |
+
@final
|
| 1048 |
+
def _get_i8_values_and_mask(
|
| 1049 |
+
self, other
|
| 1050 |
+
) -> tuple[int | npt.NDArray[np.int64], None | npt.NDArray[np.bool_]]:
|
| 1051 |
+
"""
|
| 1052 |
+
Get the int64 values and b_mask to pass to add_overflowsafe.
|
| 1053 |
+
"""
|
| 1054 |
+
if isinstance(other, Period):
|
| 1055 |
+
i8values = other.ordinal
|
| 1056 |
+
mask = None
|
| 1057 |
+
elif isinstance(other, (Timestamp, Timedelta)):
|
| 1058 |
+
i8values = other._value
|
| 1059 |
+
mask = None
|
| 1060 |
+
else:
|
| 1061 |
+
# PeriodArray, DatetimeArray, TimedeltaArray
|
| 1062 |
+
mask = other._isnan
|
| 1063 |
+
i8values = other.asi8
|
| 1064 |
+
return i8values, mask
|
| 1065 |
+
|
| 1066 |
+
@final
|
| 1067 |
+
def _get_arithmetic_result_freq(self, other) -> BaseOffset | None:
|
| 1068 |
+
"""
|
| 1069 |
+
Check if we can preserve self.freq in addition or subtraction.
|
| 1070 |
+
"""
|
| 1071 |
+
# Adding or subtracting a Timedelta/Timestamp scalar is freq-preserving
|
| 1072 |
+
# whenever self.freq is a Tick
|
| 1073 |
+
if isinstance(self.dtype, PeriodDtype):
|
| 1074 |
+
return self.freq
|
| 1075 |
+
elif not lib.is_scalar(other):
|
| 1076 |
+
return None
|
| 1077 |
+
elif isinstance(self.freq, Tick):
|
| 1078 |
+
# In these cases
|
| 1079 |
+
return self.freq
|
| 1080 |
+
return None
|
| 1081 |
+
|
| 1082 |
+
@final
|
| 1083 |
+
def _add_datetimelike_scalar(self, other) -> DatetimeArray:
|
| 1084 |
+
if not lib.is_np_dtype(self.dtype, "m"):
|
| 1085 |
+
raise TypeError(
|
| 1086 |
+
f"cannot add {type(self).__name__} and {type(other).__name__}"
|
| 1087 |
+
)
|
| 1088 |
+
|
| 1089 |
+
self = cast("TimedeltaArray", self)
|
| 1090 |
+
|
| 1091 |
+
from pandas.core.arrays import DatetimeArray
|
| 1092 |
+
from pandas.core.arrays.datetimes import tz_to_dtype
|
| 1093 |
+
|
| 1094 |
+
assert other is not NaT
|
| 1095 |
+
if isna(other):
|
| 1096 |
+
# i.e. np.datetime64("NaT")
|
| 1097 |
+
# In this case we specifically interpret NaT as a datetime, not
|
| 1098 |
+
# the timedelta interpretation we would get by returning self + NaT
|
| 1099 |
+
result = self._ndarray + NaT.to_datetime64().astype(f"M8[{self.unit}]")
|
| 1100 |
+
# Preserve our resolution
|
| 1101 |
+
return DatetimeArray._simple_new(result, dtype=result.dtype)
|
| 1102 |
+
|
| 1103 |
+
other = Timestamp(other)
|
| 1104 |
+
self, other = self._ensure_matching_resos(other)
|
| 1105 |
+
self = cast("TimedeltaArray", self)
|
| 1106 |
+
|
| 1107 |
+
other_i8, o_mask = self._get_i8_values_and_mask(other)
|
| 1108 |
+
result = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8"))
|
| 1109 |
+
res_values = result.view(f"M8[{self.unit}]")
|
| 1110 |
+
|
| 1111 |
+
dtype = tz_to_dtype(tz=other.tz, unit=self.unit)
|
| 1112 |
+
res_values = result.view(f"M8[{self.unit}]")
|
| 1113 |
+
new_freq = self._get_arithmetic_result_freq(other)
|
| 1114 |
+
return DatetimeArray._simple_new(res_values, dtype=dtype, freq=new_freq)
|
| 1115 |
+
|
| 1116 |
+
@final
|
| 1117 |
+
def _add_datetime_arraylike(self, other: DatetimeArray) -> DatetimeArray:
|
| 1118 |
+
if not lib.is_np_dtype(self.dtype, "m"):
|
| 1119 |
+
raise TypeError(
|
| 1120 |
+
f"cannot add {type(self).__name__} and {type(other).__name__}"
|
| 1121 |
+
)
|
| 1122 |
+
|
| 1123 |
+
# defer to DatetimeArray.__add__
|
| 1124 |
+
return other + self
|
| 1125 |
+
|
| 1126 |
+
@final
|
| 1127 |
+
def _sub_datetimelike_scalar(
|
| 1128 |
+
self, other: datetime | np.datetime64
|
| 1129 |
+
) -> TimedeltaArray:
|
| 1130 |
+
if self.dtype.kind != "M":
|
| 1131 |
+
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
|
| 1132 |
+
|
| 1133 |
+
self = cast("DatetimeArray", self)
|
| 1134 |
+
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
|
| 1135 |
+
|
| 1136 |
+
if isna(other):
|
| 1137 |
+
# i.e. np.datetime64("NaT")
|
| 1138 |
+
return self - NaT
|
| 1139 |
+
|
| 1140 |
+
ts = Timestamp(other)
|
| 1141 |
+
|
| 1142 |
+
self, ts = self._ensure_matching_resos(ts)
|
| 1143 |
+
return self._sub_datetimelike(ts)
|
| 1144 |
+
|
| 1145 |
+
@final
|
| 1146 |
+
def _sub_datetime_arraylike(self, other: DatetimeArray) -> TimedeltaArray:
|
| 1147 |
+
if self.dtype.kind != "M":
|
| 1148 |
+
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
|
| 1149 |
+
|
| 1150 |
+
if len(self) != len(other):
|
| 1151 |
+
raise ValueError("cannot add indices of unequal length")
|
| 1152 |
+
|
| 1153 |
+
self = cast("DatetimeArray", self)
|
| 1154 |
+
|
| 1155 |
+
self, other = self._ensure_matching_resos(other)
|
| 1156 |
+
return self._sub_datetimelike(other)
|
| 1157 |
+
|
| 1158 |
+
@final
|
| 1159 |
+
def _sub_datetimelike(self, other: Timestamp | DatetimeArray) -> TimedeltaArray:
|
| 1160 |
+
self = cast("DatetimeArray", self)
|
| 1161 |
+
|
| 1162 |
+
from pandas.core.arrays import TimedeltaArray
|
| 1163 |
+
|
| 1164 |
+
try:
|
| 1165 |
+
self._assert_tzawareness_compat(other)
|
| 1166 |
+
except TypeError as err:
|
| 1167 |
+
new_message = str(err).replace("compare", "subtract")
|
| 1168 |
+
raise type(err)(new_message) from err
|
| 1169 |
+
|
| 1170 |
+
other_i8, o_mask = self._get_i8_values_and_mask(other)
|
| 1171 |
+
res_values = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype="i8"))
|
| 1172 |
+
res_m8 = res_values.view(f"timedelta64[{self.unit}]")
|
| 1173 |
+
|
| 1174 |
+
new_freq = self._get_arithmetic_result_freq(other)
|
| 1175 |
+
new_freq = cast("Tick | None", new_freq)
|
| 1176 |
+
return TimedeltaArray._simple_new(res_m8, dtype=res_m8.dtype, freq=new_freq)
|
| 1177 |
+
|
| 1178 |
+
@final
|
| 1179 |
+
def _add_period(self, other: Period) -> PeriodArray:
|
| 1180 |
+
if not lib.is_np_dtype(self.dtype, "m"):
|
| 1181 |
+
raise TypeError(f"cannot add Period to a {type(self).__name__}")
|
| 1182 |
+
|
| 1183 |
+
# We will wrap in a PeriodArray and defer to the reversed operation
|
| 1184 |
+
from pandas.core.arrays.period import PeriodArray
|
| 1185 |
+
|
| 1186 |
+
i8vals = np.broadcast_to(other.ordinal, self.shape)
|
| 1187 |
+
dtype = PeriodDtype(other.freq)
|
| 1188 |
+
parr = PeriodArray(i8vals, dtype=dtype)
|
| 1189 |
+
return parr + self
|
| 1190 |
+
|
| 1191 |
+
def _add_offset(self, offset):
|
| 1192 |
+
raise AbstractMethodError(self)
|
| 1193 |
+
|
| 1194 |
+
def _add_timedeltalike_scalar(self, other):
|
| 1195 |
+
"""
|
| 1196 |
+
Add a delta of a timedeltalike
|
| 1197 |
+
|
| 1198 |
+
Returns
|
| 1199 |
+
-------
|
| 1200 |
+
Same type as self
|
| 1201 |
+
"""
|
| 1202 |
+
if isna(other):
|
| 1203 |
+
# i.e np.timedelta64("NaT")
|
| 1204 |
+
new_values = np.empty(self.shape, dtype="i8").view(self._ndarray.dtype)
|
| 1205 |
+
new_values.fill(iNaT)
|
| 1206 |
+
return type(self)._simple_new(new_values, dtype=self.dtype)
|
| 1207 |
+
|
| 1208 |
+
# PeriodArray overrides, so we only get here with DTA/TDA
|
| 1209 |
+
self = cast("DatetimeArray | TimedeltaArray", self)
|
| 1210 |
+
other = Timedelta(other)
|
| 1211 |
+
self, other = self._ensure_matching_resos(other)
|
| 1212 |
+
return self._add_timedeltalike(other)
|
| 1213 |
+
|
| 1214 |
+
def _add_timedelta_arraylike(self, other: TimedeltaArray):
|
| 1215 |
+
"""
|
| 1216 |
+
Add a delta of a TimedeltaIndex
|
| 1217 |
+
|
| 1218 |
+
Returns
|
| 1219 |
+
-------
|
| 1220 |
+
Same type as self
|
| 1221 |
+
"""
|
| 1222 |
+
# overridden by PeriodArray
|
| 1223 |
+
|
| 1224 |
+
if len(self) != len(other):
|
| 1225 |
+
raise ValueError("cannot add indices of unequal length")
|
| 1226 |
+
|
| 1227 |
+
self = cast("DatetimeArray | TimedeltaArray", self)
|
| 1228 |
+
|
| 1229 |
+
self, other = self._ensure_matching_resos(other)
|
| 1230 |
+
return self._add_timedeltalike(other)
|
| 1231 |
+
|
| 1232 |
+
@final
|
| 1233 |
+
def _add_timedeltalike(self, other: Timedelta | TimedeltaArray):
|
| 1234 |
+
self = cast("DatetimeArray | TimedeltaArray", self)
|
| 1235 |
+
|
| 1236 |
+
other_i8, o_mask = self._get_i8_values_and_mask(other)
|
| 1237 |
+
new_values = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8"))
|
| 1238 |
+
res_values = new_values.view(self._ndarray.dtype)
|
| 1239 |
+
|
| 1240 |
+
new_freq = self._get_arithmetic_result_freq(other)
|
| 1241 |
+
|
| 1242 |
+
# error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
|
| 1243 |
+
# incompatible type "Union[dtype[datetime64], DatetimeTZDtype,
|
| 1244 |
+
# dtype[timedelta64]]"; expected "Union[dtype[datetime64], DatetimeTZDtype]"
|
| 1245 |
+
return type(self)._simple_new(
|
| 1246 |
+
res_values, dtype=self.dtype, freq=new_freq # type: ignore[arg-type]
|
| 1247 |
+
)
|
| 1248 |
+
|
| 1249 |
+
@final
|
| 1250 |
+
def _add_nat(self):
|
| 1251 |
+
"""
|
| 1252 |
+
Add pd.NaT to self
|
| 1253 |
+
"""
|
| 1254 |
+
if isinstance(self.dtype, PeriodDtype):
|
| 1255 |
+
raise TypeError(
|
| 1256 |
+
f"Cannot add {type(self).__name__} and {type(NaT).__name__}"
|
| 1257 |
+
)
|
| 1258 |
+
self = cast("TimedeltaArray | DatetimeArray", self)
|
| 1259 |
+
|
| 1260 |
+
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
|
| 1261 |
+
# and datetime dtypes
|
| 1262 |
+
result = np.empty(self.shape, dtype=np.int64)
|
| 1263 |
+
result.fill(iNaT)
|
| 1264 |
+
result = result.view(self._ndarray.dtype) # preserve reso
|
| 1265 |
+
# error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
|
| 1266 |
+
# incompatible type "Union[dtype[timedelta64], dtype[datetime64],
|
| 1267 |
+
# DatetimeTZDtype]"; expected "Union[dtype[datetime64], DatetimeTZDtype]"
|
| 1268 |
+
return type(self)._simple_new(
|
| 1269 |
+
result, dtype=self.dtype, freq=None # type: ignore[arg-type]
|
| 1270 |
+
)
|
| 1271 |
+
|
| 1272 |
+
@final
|
| 1273 |
+
def _sub_nat(self):
|
| 1274 |
+
"""
|
| 1275 |
+
Subtract pd.NaT from self
|
| 1276 |
+
"""
|
| 1277 |
+
# GH#19124 Timedelta - datetime is not in general well-defined.
|
| 1278 |
+
# We make an exception for pd.NaT, which in this case quacks
|
| 1279 |
+
# like a timedelta.
|
| 1280 |
+
# For datetime64 dtypes by convention we treat NaT as a datetime, so
|
| 1281 |
+
# this subtraction returns a timedelta64 dtype.
|
| 1282 |
+
# For period dtype, timedelta64 is a close-enough return dtype.
|
| 1283 |
+
result = np.empty(self.shape, dtype=np.int64)
|
| 1284 |
+
result.fill(iNaT)
|
| 1285 |
+
if self.dtype.kind in "mM":
|
| 1286 |
+
# We can retain unit in dtype
|
| 1287 |
+
self = cast("DatetimeArray| TimedeltaArray", self)
|
| 1288 |
+
return result.view(f"timedelta64[{self.unit}]")
|
| 1289 |
+
else:
|
| 1290 |
+
return result.view("timedelta64[ns]")
|
| 1291 |
+
|
| 1292 |
+
@final
|
| 1293 |
+
def _sub_periodlike(self, other: Period | PeriodArray) -> npt.NDArray[np.object_]:
|
| 1294 |
+
# If the operation is well-defined, we return an object-dtype ndarray
|
| 1295 |
+
# of DateOffsets. Null entries are filled with pd.NaT
|
| 1296 |
+
if not isinstance(self.dtype, PeriodDtype):
|
| 1297 |
+
raise TypeError(
|
| 1298 |
+
f"cannot subtract {type(other).__name__} from {type(self).__name__}"
|
| 1299 |
+
)
|
| 1300 |
+
|
| 1301 |
+
self = cast("PeriodArray", self)
|
| 1302 |
+
self._check_compatible_with(other)
|
| 1303 |
+
|
| 1304 |
+
other_i8, o_mask = self._get_i8_values_and_mask(other)
|
| 1305 |
+
new_i8_data = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype="i8"))
|
| 1306 |
+
new_data = np.array([self.freq.base * x for x in new_i8_data])
|
| 1307 |
+
|
| 1308 |
+
if o_mask is None:
|
| 1309 |
+
# i.e. Period scalar
|
| 1310 |
+
mask = self._isnan
|
| 1311 |
+
else:
|
| 1312 |
+
# i.e. PeriodArray
|
| 1313 |
+
mask = self._isnan | o_mask
|
| 1314 |
+
new_data[mask] = NaT
|
| 1315 |
+
return new_data
|
| 1316 |
+
|
| 1317 |
+
@final
|
| 1318 |
+
def _addsub_object_array(self, other: npt.NDArray[np.object_], op):
|
| 1319 |
+
"""
|
| 1320 |
+
Add or subtract array-like of DateOffset objects
|
| 1321 |
+
|
| 1322 |
+
Parameters
|
| 1323 |
+
----------
|
| 1324 |
+
other : np.ndarray[object]
|
| 1325 |
+
op : {operator.add, operator.sub}
|
| 1326 |
+
|
| 1327 |
+
Returns
|
| 1328 |
+
-------
|
| 1329 |
+
np.ndarray[object]
|
| 1330 |
+
Except in fastpath case with length 1 where we operate on the
|
| 1331 |
+
contained scalar.
|
| 1332 |
+
"""
|
| 1333 |
+
assert op in [operator.add, operator.sub]
|
| 1334 |
+
if len(other) == 1 and self.ndim == 1:
|
| 1335 |
+
# Note: without this special case, we could annotate return type
|
| 1336 |
+
# as ndarray[object]
|
| 1337 |
+
# If both 1D then broadcasting is unambiguous
|
| 1338 |
+
return op(self, other[0])
|
| 1339 |
+
|
| 1340 |
+
warnings.warn(
|
| 1341 |
+
"Adding/subtracting object-dtype array to "
|
| 1342 |
+
f"{type(self).__name__} not vectorized.",
|
| 1343 |
+
PerformanceWarning,
|
| 1344 |
+
stacklevel=find_stack_level(),
|
| 1345 |
+
)
|
| 1346 |
+
|
| 1347 |
+
# Caller is responsible for broadcasting if necessary
|
| 1348 |
+
assert self.shape == other.shape, (self.shape, other.shape)
|
| 1349 |
+
|
| 1350 |
+
res_values = op(self.astype("O"), np.asarray(other))
|
| 1351 |
+
return res_values
|
| 1352 |
+
|
| 1353 |
+
def _accumulate(self, name: str, *, skipna: bool = True, **kwargs) -> Self:
|
| 1354 |
+
if name not in {"cummin", "cummax"}:
|
| 1355 |
+
raise TypeError(f"Accumulation {name} not supported for {type(self)}")
|
| 1356 |
+
|
| 1357 |
+
op = getattr(datetimelike_accumulations, name)
|
| 1358 |
+
result = op(self.copy(), skipna=skipna, **kwargs)
|
| 1359 |
+
|
| 1360 |
+
return type(self)._simple_new(result, dtype=self.dtype)
|
| 1361 |
+
|
| 1362 |
+
@unpack_zerodim_and_defer("__add__")
|
| 1363 |
+
def __add__(self, other):
|
| 1364 |
+
other_dtype = getattr(other, "dtype", None)
|
| 1365 |
+
other = ensure_wrapped_if_datetimelike(other)
|
| 1366 |
+
|
| 1367 |
+
# scalar others
|
| 1368 |
+
if other is NaT:
|
| 1369 |
+
result = self._add_nat()
|
| 1370 |
+
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
|
| 1371 |
+
result = self._add_timedeltalike_scalar(other)
|
| 1372 |
+
elif isinstance(other, BaseOffset):
|
| 1373 |
+
# specifically _not_ a Tick
|
| 1374 |
+
result = self._add_offset(other)
|
| 1375 |
+
elif isinstance(other, (datetime, np.datetime64)):
|
| 1376 |
+
result = self._add_datetimelike_scalar(other)
|
| 1377 |
+
elif isinstance(other, Period) and lib.is_np_dtype(self.dtype, "m"):
|
| 1378 |
+
result = self._add_period(other)
|
| 1379 |
+
elif lib.is_integer(other):
|
| 1380 |
+
# This check must come after the check for np.timedelta64
|
| 1381 |
+
# as is_integer returns True for these
|
| 1382 |
+
if not isinstance(self.dtype, PeriodDtype):
|
| 1383 |
+
raise integer_op_not_supported(self)
|
| 1384 |
+
obj = cast("PeriodArray", self)
|
| 1385 |
+
result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add)
|
| 1386 |
+
|
| 1387 |
+
# array-like others
|
| 1388 |
+
elif lib.is_np_dtype(other_dtype, "m"):
|
| 1389 |
+
# TimedeltaIndex, ndarray[timedelta64]
|
| 1390 |
+
result = self._add_timedelta_arraylike(other)
|
| 1391 |
+
elif is_object_dtype(other_dtype):
|
| 1392 |
+
# e.g. Array/Index of DateOffset objects
|
| 1393 |
+
result = self._addsub_object_array(other, operator.add)
|
| 1394 |
+
elif lib.is_np_dtype(other_dtype, "M") or isinstance(
|
| 1395 |
+
other_dtype, DatetimeTZDtype
|
| 1396 |
+
):
|
| 1397 |
+
# DatetimeIndex, ndarray[datetime64]
|
| 1398 |
+
return self._add_datetime_arraylike(other)
|
| 1399 |
+
elif is_integer_dtype(other_dtype):
|
| 1400 |
+
if not isinstance(self.dtype, PeriodDtype):
|
| 1401 |
+
raise integer_op_not_supported(self)
|
| 1402 |
+
obj = cast("PeriodArray", self)
|
| 1403 |
+
result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add)
|
| 1404 |
+
else:
|
| 1405 |
+
# Includes Categorical, other ExtensionArrays
|
| 1406 |
+
# For PeriodDtype, if self is a TimedeltaArray and other is a
|
| 1407 |
+
# PeriodArray with a timedelta-like (i.e. Tick) freq, this
|
| 1408 |
+
# operation is valid. Defer to the PeriodArray implementation.
|
| 1409 |
+
# In remaining cases, this will end up raising TypeError.
|
| 1410 |
+
return NotImplemented
|
| 1411 |
+
|
| 1412 |
+
if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"):
|
| 1413 |
+
from pandas.core.arrays import TimedeltaArray
|
| 1414 |
+
|
| 1415 |
+
return TimedeltaArray._from_sequence(result)
|
| 1416 |
+
return result
|
| 1417 |
+
|
| 1418 |
+
def __radd__(self, other):
|
| 1419 |
+
# alias for __add__
|
| 1420 |
+
return self.__add__(other)
|
| 1421 |
+
|
| 1422 |
+
@unpack_zerodim_and_defer("__sub__")
|
| 1423 |
+
def __sub__(self, other):
|
| 1424 |
+
other_dtype = getattr(other, "dtype", None)
|
| 1425 |
+
other = ensure_wrapped_if_datetimelike(other)
|
| 1426 |
+
|
| 1427 |
+
# scalar others
|
| 1428 |
+
if other is NaT:
|
| 1429 |
+
result = self._sub_nat()
|
| 1430 |
+
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
|
| 1431 |
+
result = self._add_timedeltalike_scalar(-other)
|
| 1432 |
+
elif isinstance(other, BaseOffset):
|
| 1433 |
+
# specifically _not_ a Tick
|
| 1434 |
+
result = self._add_offset(-other)
|
| 1435 |
+
elif isinstance(other, (datetime, np.datetime64)):
|
| 1436 |
+
result = self._sub_datetimelike_scalar(other)
|
| 1437 |
+
elif lib.is_integer(other):
|
| 1438 |
+
# This check must come after the check for np.timedelta64
|
| 1439 |
+
# as is_integer returns True for these
|
| 1440 |
+
if not isinstance(self.dtype, PeriodDtype):
|
| 1441 |
+
raise integer_op_not_supported(self)
|
| 1442 |
+
obj = cast("PeriodArray", self)
|
| 1443 |
+
result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub)
|
| 1444 |
+
|
| 1445 |
+
elif isinstance(other, Period):
|
| 1446 |
+
result = self._sub_periodlike(other)
|
| 1447 |
+
|
| 1448 |
+
# array-like others
|
| 1449 |
+
elif lib.is_np_dtype(other_dtype, "m"):
|
| 1450 |
+
# TimedeltaIndex, ndarray[timedelta64]
|
| 1451 |
+
result = self._add_timedelta_arraylike(-other)
|
| 1452 |
+
elif is_object_dtype(other_dtype):
|
| 1453 |
+
# e.g. Array/Index of DateOffset objects
|
| 1454 |
+
result = self._addsub_object_array(other, operator.sub)
|
| 1455 |
+
elif lib.is_np_dtype(other_dtype, "M") or isinstance(
|
| 1456 |
+
other_dtype, DatetimeTZDtype
|
| 1457 |
+
):
|
| 1458 |
+
# DatetimeIndex, ndarray[datetime64]
|
| 1459 |
+
result = self._sub_datetime_arraylike(other)
|
| 1460 |
+
elif isinstance(other_dtype, PeriodDtype):
|
| 1461 |
+
# PeriodIndex
|
| 1462 |
+
result = self._sub_periodlike(other)
|
| 1463 |
+
elif is_integer_dtype(other_dtype):
|
| 1464 |
+
if not isinstance(self.dtype, PeriodDtype):
|
| 1465 |
+
raise integer_op_not_supported(self)
|
| 1466 |
+
obj = cast("PeriodArray", self)
|
| 1467 |
+
result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub)
|
| 1468 |
+
else:
|
| 1469 |
+
# Includes ExtensionArrays, float_dtype
|
| 1470 |
+
return NotImplemented
|
| 1471 |
+
|
| 1472 |
+
if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"):
|
| 1473 |
+
from pandas.core.arrays import TimedeltaArray
|
| 1474 |
+
|
| 1475 |
+
return TimedeltaArray._from_sequence(result)
|
| 1476 |
+
return result
|
| 1477 |
+
|
| 1478 |
+
def __rsub__(self, other):
|
| 1479 |
+
other_dtype = getattr(other, "dtype", None)
|
| 1480 |
+
other_is_dt64 = lib.is_np_dtype(other_dtype, "M") or isinstance(
|
| 1481 |
+
other_dtype, DatetimeTZDtype
|
| 1482 |
+
)
|
| 1483 |
+
|
| 1484 |
+
if other_is_dt64 and lib.is_np_dtype(self.dtype, "m"):
|
| 1485 |
+
# ndarray[datetime64] cannot be subtracted from self, so
|
| 1486 |
+
# we need to wrap in DatetimeArray/Index and flip the operation
|
| 1487 |
+
if lib.is_scalar(other):
|
| 1488 |
+
# i.e. np.datetime64 object
|
| 1489 |
+
return Timestamp(other) - self
|
| 1490 |
+
if not isinstance(other, DatetimeLikeArrayMixin):
|
| 1491 |
+
# Avoid down-casting DatetimeIndex
|
| 1492 |
+
from pandas.core.arrays import DatetimeArray
|
| 1493 |
+
|
| 1494 |
+
other = DatetimeArray._from_sequence(other)
|
| 1495 |
+
return other - self
|
| 1496 |
+
elif self.dtype.kind == "M" and hasattr(other, "dtype") and not other_is_dt64:
|
| 1497 |
+
# GH#19959 datetime - datetime is well-defined as timedelta,
|
| 1498 |
+
# but any other type - datetime is not well-defined.
|
| 1499 |
+
raise TypeError(
|
| 1500 |
+
f"cannot subtract {type(self).__name__} from {type(other).__name__}"
|
| 1501 |
+
)
|
| 1502 |
+
elif isinstance(self.dtype, PeriodDtype) and lib.is_np_dtype(other_dtype, "m"):
|
| 1503 |
+
# TODO: Can we simplify/generalize these cases at all?
|
| 1504 |
+
raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}")
|
| 1505 |
+
elif lib.is_np_dtype(self.dtype, "m"):
|
| 1506 |
+
self = cast("TimedeltaArray", self)
|
| 1507 |
+
return (-self) + other
|
| 1508 |
+
|
| 1509 |
+
# We get here with e.g. datetime objects
|
| 1510 |
+
return -(self - other)
|
| 1511 |
+
|
| 1512 |
+
def __iadd__(self, other) -> Self:
|
| 1513 |
+
result = self + other
|
| 1514 |
+
self[:] = result[:]
|
| 1515 |
+
|
| 1516 |
+
if not isinstance(self.dtype, PeriodDtype):
|
| 1517 |
+
# restore freq, which is invalidated by setitem
|
| 1518 |
+
self._freq = result.freq
|
| 1519 |
+
return self
|
| 1520 |
+
|
| 1521 |
+
def __isub__(self, other) -> Self:
|
| 1522 |
+
result = self - other
|
| 1523 |
+
self[:] = result[:]
|
| 1524 |
+
|
| 1525 |
+
if not isinstance(self.dtype, PeriodDtype):
|
| 1526 |
+
# restore freq, which is invalidated by setitem
|
| 1527 |
+
self._freq = result.freq
|
| 1528 |
+
return self
|
| 1529 |
+
|
| 1530 |
+
# --------------------------------------------------------------
|
| 1531 |
+
# Reductions
|
| 1532 |
+
|
| 1533 |
+
@_period_dispatch
|
| 1534 |
+
def _quantile(
|
| 1535 |
+
self,
|
| 1536 |
+
qs: npt.NDArray[np.float64],
|
| 1537 |
+
interpolation: str,
|
| 1538 |
+
) -> Self:
|
| 1539 |
+
return super()._quantile(qs=qs, interpolation=interpolation)
|
| 1540 |
+
|
| 1541 |
+
@_period_dispatch
|
| 1542 |
+
def min(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
|
| 1543 |
+
"""
|
| 1544 |
+
Return the minimum value of the Array or minimum along
|
| 1545 |
+
an axis.
|
| 1546 |
+
|
| 1547 |
+
See Also
|
| 1548 |
+
--------
|
| 1549 |
+
numpy.ndarray.min
|
| 1550 |
+
Index.min : Return the minimum value in an Index.
|
| 1551 |
+
Series.min : Return the minimum value in a Series.
|
| 1552 |
+
"""
|
| 1553 |
+
nv.validate_min((), kwargs)
|
| 1554 |
+
nv.validate_minmax_axis(axis, self.ndim)
|
| 1555 |
+
|
| 1556 |
+
result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)
|
| 1557 |
+
return self._wrap_reduction_result(axis, result)
|
| 1558 |
+
|
| 1559 |
+
@_period_dispatch
|
| 1560 |
+
def max(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
|
| 1561 |
+
"""
|
| 1562 |
+
Return the maximum value of the Array or maximum along
|
| 1563 |
+
an axis.
|
| 1564 |
+
|
| 1565 |
+
See Also
|
| 1566 |
+
--------
|
| 1567 |
+
numpy.ndarray.max
|
| 1568 |
+
Index.max : Return the maximum value in an Index.
|
| 1569 |
+
Series.max : Return the maximum value in a Series.
|
| 1570 |
+
"""
|
| 1571 |
+
nv.validate_max((), kwargs)
|
| 1572 |
+
nv.validate_minmax_axis(axis, self.ndim)
|
| 1573 |
+
|
| 1574 |
+
result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)
|
| 1575 |
+
return self._wrap_reduction_result(axis, result)
|
| 1576 |
+
|
| 1577 |
+
def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0):
|
| 1578 |
+
"""
|
| 1579 |
+
Return the mean value of the Array.
|
| 1580 |
+
|
| 1581 |
+
Parameters
|
| 1582 |
+
----------
|
| 1583 |
+
skipna : bool, default True
|
| 1584 |
+
Whether to ignore any NaT elements.
|
| 1585 |
+
axis : int, optional, default 0
|
| 1586 |
+
|
| 1587 |
+
Returns
|
| 1588 |
+
-------
|
| 1589 |
+
scalar
|
| 1590 |
+
Timestamp or Timedelta.
|
| 1591 |
+
|
| 1592 |
+
See Also
|
| 1593 |
+
--------
|
| 1594 |
+
numpy.ndarray.mean : Returns the average of array elements along a given axis.
|
| 1595 |
+
Series.mean : Return the mean value in a Series.
|
| 1596 |
+
|
| 1597 |
+
Notes
|
| 1598 |
+
-----
|
| 1599 |
+
mean is only defined for Datetime and Timedelta dtypes, not for Period.
|
| 1600 |
+
|
| 1601 |
+
Examples
|
| 1602 |
+
--------
|
| 1603 |
+
For :class:`pandas.DatetimeIndex`:
|
| 1604 |
+
|
| 1605 |
+
>>> idx = pd.date_range('2001-01-01 00:00', periods=3)
|
| 1606 |
+
>>> idx
|
| 1607 |
+
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
|
| 1608 |
+
dtype='datetime64[ns]', freq='D')
|
| 1609 |
+
>>> idx.mean()
|
| 1610 |
+
Timestamp('2001-01-02 00:00:00')
|
| 1611 |
+
|
| 1612 |
+
For :class:`pandas.TimedeltaIndex`:
|
| 1613 |
+
|
| 1614 |
+
>>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D')
|
| 1615 |
+
>>> tdelta_idx
|
| 1616 |
+
TimedeltaIndex(['1 days', '2 days', '3 days'],
|
| 1617 |
+
dtype='timedelta64[ns]', freq=None)
|
| 1618 |
+
>>> tdelta_idx.mean()
|
| 1619 |
+
Timedelta('2 days 00:00:00')
|
| 1620 |
+
"""
|
| 1621 |
+
if isinstance(self.dtype, PeriodDtype):
|
| 1622 |
+
# See discussion in GH#24757
|
| 1623 |
+
raise TypeError(
|
| 1624 |
+
f"mean is not implemented for {type(self).__name__} since the "
|
| 1625 |
+
"meaning is ambiguous. An alternative is "
|
| 1626 |
+
"obj.to_timestamp(how='start').mean()"
|
| 1627 |
+
)
|
| 1628 |
+
|
| 1629 |
+
result = nanops.nanmean(
|
| 1630 |
+
self._ndarray, axis=axis, skipna=skipna, mask=self.isna()
|
| 1631 |
+
)
|
| 1632 |
+
return self._wrap_reduction_result(axis, result)
|
| 1633 |
+
|
| 1634 |
+
@_period_dispatch
|
| 1635 |
+
def median(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
|
| 1636 |
+
nv.validate_median((), kwargs)
|
| 1637 |
+
|
| 1638 |
+
if axis is not None and abs(axis) >= self.ndim:
|
| 1639 |
+
raise ValueError("abs(axis) must be less than ndim")
|
| 1640 |
+
|
| 1641 |
+
result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
|
| 1642 |
+
return self._wrap_reduction_result(axis, result)
|
| 1643 |
+
|
| 1644 |
+
def _mode(self, dropna: bool = True):
|
| 1645 |
+
mask = None
|
| 1646 |
+
if dropna:
|
| 1647 |
+
mask = self.isna()
|
| 1648 |
+
|
| 1649 |
+
i8modes = algorithms.mode(self.view("i8"), mask=mask)
|
| 1650 |
+
npmodes = i8modes.view(self._ndarray.dtype)
|
| 1651 |
+
npmodes = cast(np.ndarray, npmodes)
|
| 1652 |
+
return self._from_backing_data(npmodes)
|
| 1653 |
+
|
| 1654 |
+
# ------------------------------------------------------------------
|
| 1655 |
+
# GroupBy Methods
|
| 1656 |
+
|
| 1657 |
+
def _groupby_op(
|
| 1658 |
+
self,
|
| 1659 |
+
*,
|
| 1660 |
+
how: str,
|
| 1661 |
+
has_dropped_na: bool,
|
| 1662 |
+
min_count: int,
|
| 1663 |
+
ngroups: int,
|
| 1664 |
+
ids: npt.NDArray[np.intp],
|
| 1665 |
+
**kwargs,
|
| 1666 |
+
):
|
| 1667 |
+
dtype = self.dtype
|
| 1668 |
+
if dtype.kind == "M":
|
| 1669 |
+
# Adding/multiplying datetimes is not valid
|
| 1670 |
+
if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
|
| 1671 |
+
raise TypeError(f"datetime64 type does not support {how} operations")
|
| 1672 |
+
if how in ["any", "all"]:
|
| 1673 |
+
# GH#34479
|
| 1674 |
+
warnings.warn(
|
| 1675 |
+
f"'{how}' with datetime64 dtypes is deprecated and will raise in a "
|
| 1676 |
+
f"future version. Use (obj != pd.Timestamp(0)).{how}() instead.",
|
| 1677 |
+
FutureWarning,
|
| 1678 |
+
stacklevel=find_stack_level(),
|
| 1679 |
+
)
|
| 1680 |
+
|
| 1681 |
+
elif isinstance(dtype, PeriodDtype):
|
| 1682 |
+
# Adding/multiplying Periods is not valid
|
| 1683 |
+
if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
|
| 1684 |
+
raise TypeError(f"Period type does not support {how} operations")
|
| 1685 |
+
if how in ["any", "all"]:
|
| 1686 |
+
# GH#34479
|
| 1687 |
+
warnings.warn(
|
| 1688 |
+
f"'{how}' with PeriodDtype is deprecated and will raise in a "
|
| 1689 |
+
f"future version. Use (obj != pd.Period(0, freq)).{how}() instead.",
|
| 1690 |
+
FutureWarning,
|
| 1691 |
+
stacklevel=find_stack_level(),
|
| 1692 |
+
)
|
| 1693 |
+
else:
|
| 1694 |
+
# timedeltas we can add but not multiply
|
| 1695 |
+
if how in ["prod", "cumprod", "skew", "var"]:
|
| 1696 |
+
raise TypeError(f"timedelta64 type does not support {how} operations")
|
| 1697 |
+
|
| 1698 |
+
# All of the functions implemented here are ordinal, so we can
|
| 1699 |
+
# operate on the tz-naive equivalents
|
| 1700 |
+
npvalues = self._ndarray.view("M8[ns]")
|
| 1701 |
+
|
| 1702 |
+
from pandas.core.groupby.ops import WrappedCythonOp
|
| 1703 |
+
|
| 1704 |
+
kind = WrappedCythonOp.get_kind_from_how(how)
|
| 1705 |
+
op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
|
| 1706 |
+
|
| 1707 |
+
res_values = op._cython_op_ndim_compat(
|
| 1708 |
+
npvalues,
|
| 1709 |
+
min_count=min_count,
|
| 1710 |
+
ngroups=ngroups,
|
| 1711 |
+
comp_ids=ids,
|
| 1712 |
+
mask=None,
|
| 1713 |
+
**kwargs,
|
| 1714 |
+
)
|
| 1715 |
+
|
| 1716 |
+
if op.how in op.cast_blocklist:
|
| 1717 |
+
# i.e. how in ["rank"], since other cast_blocklist methods don't go
|
| 1718 |
+
# through cython_operation
|
| 1719 |
+
return res_values
|
| 1720 |
+
|
| 1721 |
+
# We did a view to M8[ns] above, now we go the other direction
|
| 1722 |
+
assert res_values.dtype == "M8[ns]"
|
| 1723 |
+
if how in ["std", "sem"]:
|
| 1724 |
+
from pandas.core.arrays import TimedeltaArray
|
| 1725 |
+
|
| 1726 |
+
if isinstance(self.dtype, PeriodDtype):
|
| 1727 |
+
raise TypeError("'std' and 'sem' are not valid for PeriodDtype")
|
| 1728 |
+
self = cast("DatetimeArray | TimedeltaArray", self)
|
| 1729 |
+
new_dtype = f"m8[{self.unit}]"
|
| 1730 |
+
res_values = res_values.view(new_dtype)
|
| 1731 |
+
return TimedeltaArray._simple_new(res_values, dtype=res_values.dtype)
|
| 1732 |
+
|
| 1733 |
+
res_values = res_values.view(self._ndarray.dtype)
|
| 1734 |
+
return self._from_backing_data(res_values)
|
| 1735 |
+
|
| 1736 |
+
|
| 1737 |
+
class DatelikeOps(DatetimeLikeArrayMixin):
|
| 1738 |
+
"""
|
| 1739 |
+
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
|
| 1740 |
+
"""
|
| 1741 |
+
|
| 1742 |
+
@Substitution(
|
| 1743 |
+
URL="https://docs.python.org/3/library/datetime.html"
|
| 1744 |
+
"#strftime-and-strptime-behavior"
|
| 1745 |
+
)
|
| 1746 |
+
def strftime(self, date_format: str) -> npt.NDArray[np.object_]:
|
| 1747 |
+
"""
|
| 1748 |
+
Convert to Index using specified date_format.
|
| 1749 |
+
|
| 1750 |
+
Return an Index of formatted strings specified by date_format, which
|
| 1751 |
+
supports the same string format as the python standard library. Details
|
| 1752 |
+
of the string format can be found in `python string format
|
| 1753 |
+
doc <%(URL)s>`__.
|
| 1754 |
+
|
| 1755 |
+
Formats supported by the C `strftime` API but not by the python string format
|
| 1756 |
+
doc (such as `"%%R"`, `"%%r"`) are not officially supported and should be
|
| 1757 |
+
preferably replaced with their supported equivalents (such as `"%%H:%%M"`,
|
| 1758 |
+
`"%%I:%%M:%%S %%p"`).
|
| 1759 |
+
|
| 1760 |
+
Note that `PeriodIndex` support additional directives, detailed in
|
| 1761 |
+
`Period.strftime`.
|
| 1762 |
+
|
| 1763 |
+
Parameters
|
| 1764 |
+
----------
|
| 1765 |
+
date_format : str
|
| 1766 |
+
Date format string (e.g. "%%Y-%%m-%%d").
|
| 1767 |
+
|
| 1768 |
+
Returns
|
| 1769 |
+
-------
|
| 1770 |
+
ndarray[object]
|
| 1771 |
+
NumPy ndarray of formatted strings.
|
| 1772 |
+
|
| 1773 |
+
See Also
|
| 1774 |
+
--------
|
| 1775 |
+
to_datetime : Convert the given argument to datetime.
|
| 1776 |
+
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
|
| 1777 |
+
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
|
| 1778 |
+
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
|
| 1779 |
+
Timestamp.strftime : Format a single Timestamp.
|
| 1780 |
+
Period.strftime : Format a single Period.
|
| 1781 |
+
|
| 1782 |
+
Examples
|
| 1783 |
+
--------
|
| 1784 |
+
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
|
| 1785 |
+
... periods=3, freq='s')
|
| 1786 |
+
>>> rng.strftime('%%B %%d, %%Y, %%r')
|
| 1787 |
+
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
|
| 1788 |
+
'March 10, 2018, 09:00:02 AM'],
|
| 1789 |
+
dtype='object')
|
| 1790 |
+
"""
|
| 1791 |
+
result = self._format_native_types(date_format=date_format, na_rep=np.nan)
|
| 1792 |
+
return result.astype(object, copy=False)
|
| 1793 |
+
|
| 1794 |
+
|
| 1795 |
+
_round_doc = """
|
| 1796 |
+
Perform {op} operation on the data to the specified `freq`.
|
| 1797 |
+
|
| 1798 |
+
Parameters
|
| 1799 |
+
----------
|
| 1800 |
+
freq : str or Offset
|
| 1801 |
+
The frequency level to {op} the index to. Must be a fixed
|
| 1802 |
+
frequency like 'S' (second) not 'ME' (month end). See
|
| 1803 |
+
:ref:`frequency aliases <timeseries.offset_aliases>` for
|
| 1804 |
+
a list of possible `freq` values.
|
| 1805 |
+
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
|
| 1806 |
+
Only relevant for DatetimeIndex:
|
| 1807 |
+
|
| 1808 |
+
- 'infer' will attempt to infer fall dst-transition hours based on
|
| 1809 |
+
order
|
| 1810 |
+
- bool-ndarray where True signifies a DST time, False designates
|
| 1811 |
+
a non-DST time (note that this flag is only applicable for
|
| 1812 |
+
ambiguous times)
|
| 1813 |
+
- 'NaT' will return NaT where there are ambiguous times
|
| 1814 |
+
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
|
| 1815 |
+
times.
|
| 1816 |
+
|
| 1817 |
+
nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, default 'raise'
|
| 1818 |
+
A nonexistent time does not exist in a particular timezone
|
| 1819 |
+
where clocks moved forward due to DST.
|
| 1820 |
+
|
| 1821 |
+
- 'shift_forward' will shift the nonexistent time forward to the
|
| 1822 |
+
closest existing time
|
| 1823 |
+
- 'shift_backward' will shift the nonexistent time backward to the
|
| 1824 |
+
closest existing time
|
| 1825 |
+
- 'NaT' will return NaT where there are nonexistent times
|
| 1826 |
+
- timedelta objects will shift nonexistent times by the timedelta
|
| 1827 |
+
- 'raise' will raise an NonExistentTimeError if there are
|
| 1828 |
+
nonexistent times.
|
| 1829 |
+
|
| 1830 |
+
Returns
|
| 1831 |
+
-------
|
| 1832 |
+
DatetimeIndex, TimedeltaIndex, or Series
|
| 1833 |
+
Index of the same type for a DatetimeIndex or TimedeltaIndex,
|
| 1834 |
+
or a Series with the same index for a Series.
|
| 1835 |
+
|
| 1836 |
+
Raises
|
| 1837 |
+
------
|
| 1838 |
+
ValueError if the `freq` cannot be converted.
|
| 1839 |
+
|
| 1840 |
+
Notes
|
| 1841 |
+
-----
|
| 1842 |
+
If the timestamps have a timezone, {op}ing will take place relative to the
|
| 1843 |
+
local ("wall") time and re-localized to the same timezone. When {op}ing
|
| 1844 |
+
near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
|
| 1845 |
+
control the re-localization behavior.
|
| 1846 |
+
|
| 1847 |
+
Examples
|
| 1848 |
+
--------
|
| 1849 |
+
**DatetimeIndex**
|
| 1850 |
+
|
| 1851 |
+
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
|
| 1852 |
+
>>> rng
|
| 1853 |
+
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
|
| 1854 |
+
'2018-01-01 12:01:00'],
|
| 1855 |
+
dtype='datetime64[ns]', freq='min')
|
| 1856 |
+
"""
|
| 1857 |
+
|
| 1858 |
+
_round_example = """>>> rng.round('h')
|
| 1859 |
+
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
|
| 1860 |
+
'2018-01-01 12:00:00'],
|
| 1861 |
+
dtype='datetime64[ns]', freq=None)
|
| 1862 |
+
|
| 1863 |
+
**Series**
|
| 1864 |
+
|
| 1865 |
+
>>> pd.Series(rng).dt.round("h")
|
| 1866 |
+
0 2018-01-01 12:00:00
|
| 1867 |
+
1 2018-01-01 12:00:00
|
| 1868 |
+
2 2018-01-01 12:00:00
|
| 1869 |
+
dtype: datetime64[ns]
|
| 1870 |
+
|
| 1871 |
+
When rounding near a daylight savings time transition, use ``ambiguous`` or
|
| 1872 |
+
``nonexistent`` to control how the timestamp should be re-localized.
|
| 1873 |
+
|
| 1874 |
+
>>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")
|
| 1875 |
+
|
| 1876 |
+
>>> rng_tz.floor("2h", ambiguous=False)
|
| 1877 |
+
DatetimeIndex(['2021-10-31 02:00:00+01:00'],
|
| 1878 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
| 1879 |
+
|
| 1880 |
+
>>> rng_tz.floor("2h", ambiguous=True)
|
| 1881 |
+
DatetimeIndex(['2021-10-31 02:00:00+02:00'],
|
| 1882 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
| 1883 |
+
"""
|
| 1884 |
+
|
| 1885 |
+
_floor_example = """>>> rng.floor('h')
|
| 1886 |
+
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
|
| 1887 |
+
'2018-01-01 12:00:00'],
|
| 1888 |
+
dtype='datetime64[ns]', freq=None)
|
| 1889 |
+
|
| 1890 |
+
**Series**
|
| 1891 |
+
|
| 1892 |
+
>>> pd.Series(rng).dt.floor("h")
|
| 1893 |
+
0 2018-01-01 11:00:00
|
| 1894 |
+
1 2018-01-01 12:00:00
|
| 1895 |
+
2 2018-01-01 12:00:00
|
| 1896 |
+
dtype: datetime64[ns]
|
| 1897 |
+
|
| 1898 |
+
When rounding near a daylight savings time transition, use ``ambiguous`` or
|
| 1899 |
+
``nonexistent`` to control how the timestamp should be re-localized.
|
| 1900 |
+
|
| 1901 |
+
>>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")
|
| 1902 |
+
|
| 1903 |
+
>>> rng_tz.floor("2h", ambiguous=False)
|
| 1904 |
+
DatetimeIndex(['2021-10-31 02:00:00+01:00'],
|
| 1905 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
| 1906 |
+
|
| 1907 |
+
>>> rng_tz.floor("2h", ambiguous=True)
|
| 1908 |
+
DatetimeIndex(['2021-10-31 02:00:00+02:00'],
|
| 1909 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
| 1910 |
+
"""
|
| 1911 |
+
|
| 1912 |
+
_ceil_example = """>>> rng.ceil('h')
|
| 1913 |
+
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
|
| 1914 |
+
'2018-01-01 13:00:00'],
|
| 1915 |
+
dtype='datetime64[ns]', freq=None)
|
| 1916 |
+
|
| 1917 |
+
**Series**
|
| 1918 |
+
|
| 1919 |
+
>>> pd.Series(rng).dt.ceil("h")
|
| 1920 |
+
0 2018-01-01 12:00:00
|
| 1921 |
+
1 2018-01-01 12:00:00
|
| 1922 |
+
2 2018-01-01 13:00:00
|
| 1923 |
+
dtype: datetime64[ns]
|
| 1924 |
+
|
| 1925 |
+
When rounding near a daylight savings time transition, use ``ambiguous`` or
|
| 1926 |
+
``nonexistent`` to control how the timestamp should be re-localized.
|
| 1927 |
+
|
| 1928 |
+
>>> rng_tz = pd.DatetimeIndex(["2021-10-31 01:30:00"], tz="Europe/Amsterdam")
|
| 1929 |
+
|
| 1930 |
+
>>> rng_tz.ceil("h", ambiguous=False)
|
| 1931 |
+
DatetimeIndex(['2021-10-31 02:00:00+01:00'],
|
| 1932 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
| 1933 |
+
|
| 1934 |
+
>>> rng_tz.ceil("h", ambiguous=True)
|
| 1935 |
+
DatetimeIndex(['2021-10-31 02:00:00+02:00'],
|
| 1936 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
| 1937 |
+
"""
|
| 1938 |
+
|
| 1939 |
+
|
| 1940 |
+
class TimelikeOps(DatetimeLikeArrayMixin):
|
| 1941 |
+
"""
|
| 1942 |
+
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
|
| 1943 |
+
"""
|
| 1944 |
+
|
| 1945 |
+
_default_dtype: np.dtype
|
| 1946 |
+
|
| 1947 |
+
def __init__(
|
| 1948 |
+
self, values, dtype=None, freq=lib.no_default, copy: bool = False
|
| 1949 |
+
) -> None:
|
| 1950 |
+
warnings.warn(
|
| 1951 |
+
# GH#55623
|
| 1952 |
+
f"{type(self).__name__}.__init__ is deprecated and will be "
|
| 1953 |
+
"removed in a future version. Use pd.array instead.",
|
| 1954 |
+
FutureWarning,
|
| 1955 |
+
stacklevel=find_stack_level(),
|
| 1956 |
+
)
|
| 1957 |
+
if dtype is not None:
|
| 1958 |
+
dtype = pandas_dtype(dtype)
|
| 1959 |
+
|
| 1960 |
+
values = extract_array(values, extract_numpy=True)
|
| 1961 |
+
if isinstance(values, IntegerArray):
|
| 1962 |
+
values = values.to_numpy("int64", na_value=iNaT)
|
| 1963 |
+
|
| 1964 |
+
inferred_freq = getattr(values, "_freq", None)
|
| 1965 |
+
explicit_none = freq is None
|
| 1966 |
+
freq = freq if freq is not lib.no_default else None
|
| 1967 |
+
|
| 1968 |
+
if isinstance(values, type(self)):
|
| 1969 |
+
if explicit_none:
|
| 1970 |
+
# don't inherit from values
|
| 1971 |
+
pass
|
| 1972 |
+
elif freq is None:
|
| 1973 |
+
freq = values.freq
|
| 1974 |
+
elif freq and values.freq:
|
| 1975 |
+
freq = to_offset(freq)
|
| 1976 |
+
freq = _validate_inferred_freq(freq, values.freq)
|
| 1977 |
+
|
| 1978 |
+
if dtype is not None and dtype != values.dtype:
|
| 1979 |
+
# TODO: we only have tests for this for DTA, not TDA (2022-07-01)
|
| 1980 |
+
raise TypeError(
|
| 1981 |
+
f"dtype={dtype} does not match data dtype {values.dtype}"
|
| 1982 |
+
)
|
| 1983 |
+
|
| 1984 |
+
dtype = values.dtype
|
| 1985 |
+
values = values._ndarray
|
| 1986 |
+
|
| 1987 |
+
elif dtype is None:
|
| 1988 |
+
if isinstance(values, np.ndarray) and values.dtype.kind in "Mm":
|
| 1989 |
+
dtype = values.dtype
|
| 1990 |
+
else:
|
| 1991 |
+
dtype = self._default_dtype
|
| 1992 |
+
if isinstance(values, np.ndarray) and values.dtype == "i8":
|
| 1993 |
+
values = values.view(dtype)
|
| 1994 |
+
|
| 1995 |
+
if not isinstance(values, np.ndarray):
|
| 1996 |
+
raise ValueError(
|
| 1997 |
+
f"Unexpected type '{type(values).__name__}'. 'values' must be a "
|
| 1998 |
+
f"{type(self).__name__}, ndarray, or Series or Index "
|
| 1999 |
+
"containing one of those."
|
| 2000 |
+
)
|
| 2001 |
+
if values.ndim not in [1, 2]:
|
| 2002 |
+
raise ValueError("Only 1-dimensional input arrays are supported.")
|
| 2003 |
+
|
| 2004 |
+
if values.dtype == "i8":
|
| 2005 |
+
# for compat with datetime/timedelta/period shared methods,
|
| 2006 |
+
# we can sometimes get here with int64 values. These represent
|
| 2007 |
+
# nanosecond UTC (or tz-naive) unix timestamps
|
| 2008 |
+
if dtype is None:
|
| 2009 |
+
dtype = self._default_dtype
|
| 2010 |
+
values = values.view(self._default_dtype)
|
| 2011 |
+
elif lib.is_np_dtype(dtype, "mM"):
|
| 2012 |
+
values = values.view(dtype)
|
| 2013 |
+
elif isinstance(dtype, DatetimeTZDtype):
|
| 2014 |
+
kind = self._default_dtype.kind
|
| 2015 |
+
new_dtype = f"{kind}8[{dtype.unit}]"
|
| 2016 |
+
values = values.view(new_dtype)
|
| 2017 |
+
|
| 2018 |
+
dtype = self._validate_dtype(values, dtype)
|
| 2019 |
+
|
| 2020 |
+
if freq == "infer":
|
| 2021 |
+
raise ValueError(
|
| 2022 |
+
f"Frequency inference not allowed in {type(self).__name__}.__init__. "
|
| 2023 |
+
"Use 'pd.array()' instead."
|
| 2024 |
+
)
|
| 2025 |
+
|
| 2026 |
+
if copy:
|
| 2027 |
+
values = values.copy()
|
| 2028 |
+
if freq:
|
| 2029 |
+
freq = to_offset(freq)
|
| 2030 |
+
if values.dtype.kind == "m" and not isinstance(freq, Tick):
|
| 2031 |
+
raise TypeError("TimedeltaArray/Index freq must be a Tick")
|
| 2032 |
+
|
| 2033 |
+
NDArrayBacked.__init__(self, values=values, dtype=dtype)
|
| 2034 |
+
self._freq = freq
|
| 2035 |
+
|
| 2036 |
+
if inferred_freq is None and freq is not None:
|
| 2037 |
+
type(self)._validate_frequency(self, freq)
|
| 2038 |
+
|
| 2039 |
+
@classmethod
|
| 2040 |
+
def _validate_dtype(cls, values, dtype):
|
| 2041 |
+
raise AbstractMethodError(cls)
|
| 2042 |
+
|
| 2043 |
+
@property
|
| 2044 |
+
def freq(self):
|
| 2045 |
+
"""
|
| 2046 |
+
Return the frequency object if it is set, otherwise None.
|
| 2047 |
+
"""
|
| 2048 |
+
return self._freq
|
| 2049 |
+
|
| 2050 |
+
@freq.setter
|
| 2051 |
+
def freq(self, value) -> None:
|
| 2052 |
+
if value is not None:
|
| 2053 |
+
value = to_offset(value)
|
| 2054 |
+
self._validate_frequency(self, value)
|
| 2055 |
+
if self.dtype.kind == "m" and not isinstance(value, Tick):
|
| 2056 |
+
raise TypeError("TimedeltaArray/Index freq must be a Tick")
|
| 2057 |
+
|
| 2058 |
+
if self.ndim > 1:
|
| 2059 |
+
raise ValueError("Cannot set freq with ndim > 1")
|
| 2060 |
+
|
| 2061 |
+
self._freq = value
|
| 2062 |
+
|
| 2063 |
+
@final
|
| 2064 |
+
def _maybe_pin_freq(self, freq, validate_kwds: dict):
|
| 2065 |
+
"""
|
| 2066 |
+
Constructor helper to pin the appropriate `freq` attribute. Assumes
|
| 2067 |
+
that self._freq is currently set to any freq inferred in
|
| 2068 |
+
_from_sequence_not_strict.
|
| 2069 |
+
"""
|
| 2070 |
+
if freq is None:
|
| 2071 |
+
# user explicitly passed None -> override any inferred_freq
|
| 2072 |
+
self._freq = None
|
| 2073 |
+
elif freq == "infer":
|
| 2074 |
+
# if self._freq is *not* None then we already inferred a freq
|
| 2075 |
+
# and there is nothing left to do
|
| 2076 |
+
if self._freq is None:
|
| 2077 |
+
# Set _freq directly to bypass duplicative _validate_frequency
|
| 2078 |
+
# check.
|
| 2079 |
+
self._freq = to_offset(self.inferred_freq)
|
| 2080 |
+
elif freq is lib.no_default:
|
| 2081 |
+
# user did not specify anything, keep inferred freq if the original
|
| 2082 |
+
# data had one, otherwise do nothing
|
| 2083 |
+
pass
|
| 2084 |
+
elif self._freq is None:
|
| 2085 |
+
# We cannot inherit a freq from the data, so we need to validate
|
| 2086 |
+
# the user-passed freq
|
| 2087 |
+
freq = to_offset(freq)
|
| 2088 |
+
type(self)._validate_frequency(self, freq, **validate_kwds)
|
| 2089 |
+
self._freq = freq
|
| 2090 |
+
else:
|
| 2091 |
+
# Otherwise we just need to check that the user-passed freq
|
| 2092 |
+
# doesn't conflict with the one we already have.
|
| 2093 |
+
freq = to_offset(freq)
|
| 2094 |
+
_validate_inferred_freq(freq, self._freq)
|
| 2095 |
+
|
| 2096 |
+
@final
|
| 2097 |
+
@classmethod
|
| 2098 |
+
def _validate_frequency(cls, index, freq: BaseOffset, **kwargs):
|
| 2099 |
+
"""
|
| 2100 |
+
Validate that a frequency is compatible with the values of a given
|
| 2101 |
+
Datetime Array/Index or Timedelta Array/Index
|
| 2102 |
+
|
| 2103 |
+
Parameters
|
| 2104 |
+
----------
|
| 2105 |
+
index : DatetimeIndex or TimedeltaIndex
|
| 2106 |
+
The index on which to determine if the given frequency is valid
|
| 2107 |
+
freq : DateOffset
|
| 2108 |
+
The frequency to validate
|
| 2109 |
+
"""
|
| 2110 |
+
inferred = index.inferred_freq
|
| 2111 |
+
if index.size == 0 or inferred == freq.freqstr:
|
| 2112 |
+
return None
|
| 2113 |
+
|
| 2114 |
+
try:
|
| 2115 |
+
on_freq = cls._generate_range(
|
| 2116 |
+
start=index[0],
|
| 2117 |
+
end=None,
|
| 2118 |
+
periods=len(index),
|
| 2119 |
+
freq=freq,
|
| 2120 |
+
unit=index.unit,
|
| 2121 |
+
**kwargs,
|
| 2122 |
+
)
|
| 2123 |
+
if not np.array_equal(index.asi8, on_freq.asi8):
|
| 2124 |
+
raise ValueError
|
| 2125 |
+
except ValueError as err:
|
| 2126 |
+
if "non-fixed" in str(err):
|
| 2127 |
+
# non-fixed frequencies are not meaningful for timedelta64;
|
| 2128 |
+
# we retain that error message
|
| 2129 |
+
raise err
|
| 2130 |
+
# GH#11587 the main way this is reached is if the `np.array_equal`
|
| 2131 |
+
# check above is False. This can also be reached if index[0]
|
| 2132 |
+
# is `NaT`, in which case the call to `cls._generate_range` will
|
| 2133 |
+
# raise a ValueError, which we re-raise with a more targeted
|
| 2134 |
+
# message.
|
| 2135 |
+
raise ValueError(
|
| 2136 |
+
f"Inferred frequency {inferred} from passed values "
|
| 2137 |
+
f"does not conform to passed frequency {freq.freqstr}"
|
| 2138 |
+
) from err
|
| 2139 |
+
|
| 2140 |
+
@classmethod
|
| 2141 |
+
def _generate_range(
|
| 2142 |
+
cls, start, end, periods: int | None, freq, *args, **kwargs
|
| 2143 |
+
) -> Self:
|
| 2144 |
+
raise AbstractMethodError(cls)
|
| 2145 |
+
|
| 2146 |
+
# --------------------------------------------------------------
|
| 2147 |
+
|
| 2148 |
+
@cache_readonly
|
| 2149 |
+
def _creso(self) -> int:
|
| 2150 |
+
return get_unit_from_dtype(self._ndarray.dtype)
|
| 2151 |
+
|
| 2152 |
+
@cache_readonly
|
| 2153 |
+
def unit(self) -> str:
|
| 2154 |
+
# e.g. "ns", "us", "ms"
|
| 2155 |
+
# error: Argument 1 to "dtype_to_unit" has incompatible type
|
| 2156 |
+
# "ExtensionDtype"; expected "Union[DatetimeTZDtype, dtype[Any]]"
|
| 2157 |
+
return dtype_to_unit(self.dtype) # type: ignore[arg-type]
|
| 2158 |
+
|
| 2159 |
+
def as_unit(self, unit: str, round_ok: bool = True) -> Self:
|
| 2160 |
+
if unit not in ["s", "ms", "us", "ns"]:
|
| 2161 |
+
raise ValueError("Supported units are 's', 'ms', 'us', 'ns'")
|
| 2162 |
+
|
| 2163 |
+
dtype = np.dtype(f"{self.dtype.kind}8[{unit}]")
|
| 2164 |
+
new_values = astype_overflowsafe(self._ndarray, dtype, round_ok=round_ok)
|
| 2165 |
+
|
| 2166 |
+
if isinstance(self.dtype, np.dtype):
|
| 2167 |
+
new_dtype = new_values.dtype
|
| 2168 |
+
else:
|
| 2169 |
+
tz = cast("DatetimeArray", self).tz
|
| 2170 |
+
new_dtype = DatetimeTZDtype(tz=tz, unit=unit)
|
| 2171 |
+
|
| 2172 |
+
# error: Unexpected keyword argument "freq" for "_simple_new" of
|
| 2173 |
+
# "NDArrayBacked" [call-arg]
|
| 2174 |
+
return type(self)._simple_new(
|
| 2175 |
+
new_values, dtype=new_dtype, freq=self.freq # type: ignore[call-arg]
|
| 2176 |
+
)
|
| 2177 |
+
|
| 2178 |
+
# TODO: annotate other as DatetimeArray | TimedeltaArray | Timestamp | Timedelta
|
| 2179 |
+
# with the return type matching input type. TypeVar?
|
| 2180 |
+
def _ensure_matching_resos(self, other):
|
| 2181 |
+
if self._creso != other._creso:
|
| 2182 |
+
# Just as with Timestamp/Timedelta, we cast to the higher resolution
|
| 2183 |
+
if self._creso < other._creso:
|
| 2184 |
+
self = self.as_unit(other.unit)
|
| 2185 |
+
else:
|
| 2186 |
+
other = other.as_unit(self.unit)
|
| 2187 |
+
return self, other
|
| 2188 |
+
|
| 2189 |
+
# --------------------------------------------------------------
|
| 2190 |
+
|
| 2191 |
+
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
| 2192 |
+
if (
|
| 2193 |
+
ufunc in [np.isnan, np.isinf, np.isfinite]
|
| 2194 |
+
and len(inputs) == 1
|
| 2195 |
+
and inputs[0] is self
|
| 2196 |
+
):
|
| 2197 |
+
# numpy 1.18 changed isinf and isnan to not raise on dt64/td64
|
| 2198 |
+
return getattr(ufunc, method)(self._ndarray, **kwargs)
|
| 2199 |
+
|
| 2200 |
+
return super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
|
| 2201 |
+
|
| 2202 |
+
def _round(self, freq, mode, ambiguous, nonexistent):
|
| 2203 |
+
# round the local times
|
| 2204 |
+
if isinstance(self.dtype, DatetimeTZDtype):
|
| 2205 |
+
# operate on naive timestamps, then convert back to aware
|
| 2206 |
+
self = cast("DatetimeArray", self)
|
| 2207 |
+
naive = self.tz_localize(None)
|
| 2208 |
+
result = naive._round(freq, mode, ambiguous, nonexistent)
|
| 2209 |
+
return result.tz_localize(
|
| 2210 |
+
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
|
| 2211 |
+
)
|
| 2212 |
+
|
| 2213 |
+
values = self.view("i8")
|
| 2214 |
+
values = cast(np.ndarray, values)
|
| 2215 |
+
nanos = get_unit_for_round(freq, self._creso)
|
| 2216 |
+
if nanos == 0:
|
| 2217 |
+
# GH 52761
|
| 2218 |
+
return self.copy()
|
| 2219 |
+
result_i8 = round_nsint64(values, mode, nanos)
|
| 2220 |
+
result = self._maybe_mask_results(result_i8, fill_value=iNaT)
|
| 2221 |
+
result = result.view(self._ndarray.dtype)
|
| 2222 |
+
return self._simple_new(result, dtype=self.dtype)
|
| 2223 |
+
|
| 2224 |
+
@Appender((_round_doc + _round_example).format(op="round"))
|
| 2225 |
+
def round(
|
| 2226 |
+
self,
|
| 2227 |
+
freq,
|
| 2228 |
+
ambiguous: TimeAmbiguous = "raise",
|
| 2229 |
+
nonexistent: TimeNonexistent = "raise",
|
| 2230 |
+
) -> Self:
|
| 2231 |
+
return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
|
| 2232 |
+
|
| 2233 |
+
@Appender((_round_doc + _floor_example).format(op="floor"))
|
| 2234 |
+
def floor(
|
| 2235 |
+
self,
|
| 2236 |
+
freq,
|
| 2237 |
+
ambiguous: TimeAmbiguous = "raise",
|
| 2238 |
+
nonexistent: TimeNonexistent = "raise",
|
| 2239 |
+
) -> Self:
|
| 2240 |
+
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
|
| 2241 |
+
|
| 2242 |
+
@Appender((_round_doc + _ceil_example).format(op="ceil"))
|
| 2243 |
+
def ceil(
|
| 2244 |
+
self,
|
| 2245 |
+
freq,
|
| 2246 |
+
ambiguous: TimeAmbiguous = "raise",
|
| 2247 |
+
nonexistent: TimeNonexistent = "raise",
|
| 2248 |
+
) -> Self:
|
| 2249 |
+
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
|
| 2250 |
+
|
| 2251 |
+
# --------------------------------------------------------------
|
| 2252 |
+
# Reductions
|
| 2253 |
+
|
| 2254 |
+
def any(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool:
|
| 2255 |
+
# GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype
|
| 2256 |
+
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
|
| 2257 |
+
|
| 2258 |
+
def all(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool:
|
| 2259 |
+
# GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype
|
| 2260 |
+
|
| 2261 |
+
return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
|
| 2262 |
+
|
| 2263 |
+
# --------------------------------------------------------------
|
| 2264 |
+
# Frequency Methods
|
| 2265 |
+
|
| 2266 |
+
def _maybe_clear_freq(self) -> None:
|
| 2267 |
+
self._freq = None
|
| 2268 |
+
|
| 2269 |
+
def _with_freq(self, freq) -> Self:
|
| 2270 |
+
"""
|
| 2271 |
+
Helper to get a view on the same data, with a new freq.
|
| 2272 |
+
|
| 2273 |
+
Parameters
|
| 2274 |
+
----------
|
| 2275 |
+
freq : DateOffset, None, or "infer"
|
| 2276 |
+
|
| 2277 |
+
Returns
|
| 2278 |
+
-------
|
| 2279 |
+
Same type as self
|
| 2280 |
+
"""
|
| 2281 |
+
# GH#29843
|
| 2282 |
+
if freq is None:
|
| 2283 |
+
# Always valid
|
| 2284 |
+
pass
|
| 2285 |
+
elif len(self) == 0 and isinstance(freq, BaseOffset):
|
| 2286 |
+
# Always valid. In the TimedeltaArray case, we require a Tick offset
|
| 2287 |
+
if self.dtype.kind == "m" and not isinstance(freq, Tick):
|
| 2288 |
+
raise TypeError("TimedeltaArray/Index freq must be a Tick")
|
| 2289 |
+
else:
|
| 2290 |
+
# As an internal method, we can ensure this assertion always holds
|
| 2291 |
+
assert freq == "infer"
|
| 2292 |
+
freq = to_offset(self.inferred_freq)
|
| 2293 |
+
|
| 2294 |
+
arr = self.view()
|
| 2295 |
+
arr._freq = freq
|
| 2296 |
+
return arr
|
| 2297 |
+
|
| 2298 |
+
# --------------------------------------------------------------
|
| 2299 |
+
# ExtensionArray Interface
|
| 2300 |
+
|
| 2301 |
+
def _values_for_json(self) -> np.ndarray:
|
| 2302 |
+
# Small performance bump vs the base class which calls np.asarray(self)
|
| 2303 |
+
if isinstance(self.dtype, np.dtype):
|
| 2304 |
+
return self._ndarray
|
| 2305 |
+
return super()._values_for_json()
|
| 2306 |
+
|
| 2307 |
+
def factorize(
|
| 2308 |
+
self,
|
| 2309 |
+
use_na_sentinel: bool = True,
|
| 2310 |
+
sort: bool = False,
|
| 2311 |
+
):
|
| 2312 |
+
if self.freq is not None:
|
| 2313 |
+
# We must be unique, so can short-circuit (and retain freq)
|
| 2314 |
+
codes = np.arange(len(self), dtype=np.intp)
|
| 2315 |
+
uniques = self.copy() # TODO: copy or view?
|
| 2316 |
+
if sort and self.freq.n < 0:
|
| 2317 |
+
codes = codes[::-1]
|
| 2318 |
+
uniques = uniques[::-1]
|
| 2319 |
+
return codes, uniques
|
| 2320 |
+
|
| 2321 |
+
if sort:
|
| 2322 |
+
# algorithms.factorize only passes sort=True here when freq is
|
| 2323 |
+
# not None, so this should not be reached.
|
| 2324 |
+
raise NotImplementedError(
|
| 2325 |
+
f"The 'sort' keyword in {type(self).__name__}.factorize is "
|
| 2326 |
+
"ignored unless arr.freq is not None. To factorize with sort, "
|
| 2327 |
+
"call pd.factorize(obj, sort=True) instead."
|
| 2328 |
+
)
|
| 2329 |
+
return super().factorize(use_na_sentinel=use_na_sentinel)
|
| 2330 |
+
|
| 2331 |
+
@classmethod
|
| 2332 |
+
def _concat_same_type(
|
| 2333 |
+
cls,
|
| 2334 |
+
to_concat: Sequence[Self],
|
| 2335 |
+
axis: AxisInt = 0,
|
| 2336 |
+
) -> Self:
|
| 2337 |
+
new_obj = super()._concat_same_type(to_concat, axis)
|
| 2338 |
+
|
| 2339 |
+
obj = to_concat[0]
|
| 2340 |
+
|
| 2341 |
+
if axis == 0:
|
| 2342 |
+
# GH 3232: If the concat result is evenly spaced, we can retain the
|
| 2343 |
+
# original frequency
|
| 2344 |
+
to_concat = [x for x in to_concat if len(x)]
|
| 2345 |
+
|
| 2346 |
+
if obj.freq is not None and all(x.freq == obj.freq for x in to_concat):
|
| 2347 |
+
pairs = zip(to_concat[:-1], to_concat[1:])
|
| 2348 |
+
if all(pair[0][-1] + obj.freq == pair[1][0] for pair in pairs):
|
| 2349 |
+
new_freq = obj.freq
|
| 2350 |
+
new_obj._freq = new_freq
|
| 2351 |
+
return new_obj
|
| 2352 |
+
|
| 2353 |
+
def copy(self, order: str = "C") -> Self:
|
| 2354 |
+
new_obj = super().copy(order=order)
|
| 2355 |
+
new_obj._freq = self.freq
|
| 2356 |
+
return new_obj
|
| 2357 |
+
|
| 2358 |
+
def interpolate(
|
| 2359 |
+
self,
|
| 2360 |
+
*,
|
| 2361 |
+
method: InterpolateOptions,
|
| 2362 |
+
axis: int,
|
| 2363 |
+
index: Index,
|
| 2364 |
+
limit,
|
| 2365 |
+
limit_direction,
|
| 2366 |
+
limit_area,
|
| 2367 |
+
copy: bool,
|
| 2368 |
+
**kwargs,
|
| 2369 |
+
) -> Self:
|
| 2370 |
+
"""
|
| 2371 |
+
See NDFrame.interpolate.__doc__.
|
| 2372 |
+
"""
|
| 2373 |
+
# NB: we return type(self) even if copy=False
|
| 2374 |
+
if method != "linear":
|
| 2375 |
+
raise NotImplementedError
|
| 2376 |
+
|
| 2377 |
+
if not copy:
|
| 2378 |
+
out_data = self._ndarray
|
| 2379 |
+
else:
|
| 2380 |
+
out_data = self._ndarray.copy()
|
| 2381 |
+
|
| 2382 |
+
missing.interpolate_2d_inplace(
|
| 2383 |
+
out_data,
|
| 2384 |
+
method=method,
|
| 2385 |
+
axis=axis,
|
| 2386 |
+
index=index,
|
| 2387 |
+
limit=limit,
|
| 2388 |
+
limit_direction=limit_direction,
|
| 2389 |
+
limit_area=limit_area,
|
| 2390 |
+
**kwargs,
|
| 2391 |
+
)
|
| 2392 |
+
if not copy:
|
| 2393 |
+
return self
|
| 2394 |
+
return type(self)._simple_new(out_data, dtype=self.dtype)
|
| 2395 |
+
|
| 2396 |
+
# --------------------------------------------------------------
|
| 2397 |
+
# Unsorted
|
| 2398 |
+
|
| 2399 |
+
@property
|
| 2400 |
+
def _is_dates_only(self) -> bool:
|
| 2401 |
+
"""
|
| 2402 |
+
Check if we are round times at midnight (and no timezone), which will
|
| 2403 |
+
be given a more compact __repr__ than other cases. For TimedeltaArray
|
| 2404 |
+
we are checking for multiples of 24H.
|
| 2405 |
+
"""
|
| 2406 |
+
if not lib.is_np_dtype(self.dtype):
|
| 2407 |
+
# i.e. we have a timezone
|
| 2408 |
+
return False
|
| 2409 |
+
|
| 2410 |
+
values_int = self.asi8
|
| 2411 |
+
consider_values = values_int != iNaT
|
| 2412 |
+
reso = get_unit_from_dtype(self.dtype)
|
| 2413 |
+
ppd = periods_per_day(reso)
|
| 2414 |
+
|
| 2415 |
+
# TODO: can we reuse is_date_array_normalized? would need a skipna kwd
|
| 2416 |
+
# (first attempt at this was less performant than this implementation)
|
| 2417 |
+
even_days = np.logical_and(consider_values, values_int % ppd != 0).sum() == 0
|
| 2418 |
+
return even_days
|
| 2419 |
+
|
| 2420 |
+
|
| 2421 |
+
# -------------------------------------------------------------------
|
| 2422 |
+
# Shared Constructor Helpers
|
| 2423 |
+
|
| 2424 |
+
|
| 2425 |
+
def ensure_arraylike_for_datetimelike(
|
| 2426 |
+
data, copy: bool, cls_name: str
|
| 2427 |
+
) -> tuple[ArrayLike, bool]:
|
| 2428 |
+
if not hasattr(data, "dtype"):
|
| 2429 |
+
# e.g. list, tuple
|
| 2430 |
+
if not isinstance(data, (list, tuple)) and np.ndim(data) == 0:
|
| 2431 |
+
# i.e. generator
|
| 2432 |
+
data = list(data)
|
| 2433 |
+
|
| 2434 |
+
data = construct_1d_object_array_from_listlike(data)
|
| 2435 |
+
copy = False
|
| 2436 |
+
elif isinstance(data, ABCMultiIndex):
|
| 2437 |
+
raise TypeError(f"Cannot create a {cls_name} from a MultiIndex.")
|
| 2438 |
+
else:
|
| 2439 |
+
data = extract_array(data, extract_numpy=True)
|
| 2440 |
+
|
| 2441 |
+
if isinstance(data, IntegerArray) or (
|
| 2442 |
+
isinstance(data, ArrowExtensionArray) and data.dtype.kind in "iu"
|
| 2443 |
+
):
|
| 2444 |
+
data = data.to_numpy("int64", na_value=iNaT)
|
| 2445 |
+
copy = False
|
| 2446 |
+
elif isinstance(data, ArrowExtensionArray):
|
| 2447 |
+
data = data._maybe_convert_datelike_array()
|
| 2448 |
+
data = data.to_numpy()
|
| 2449 |
+
copy = False
|
| 2450 |
+
elif not isinstance(data, (np.ndarray, ExtensionArray)):
|
| 2451 |
+
# GH#24539 e.g. xarray, dask object
|
| 2452 |
+
data = np.asarray(data)
|
| 2453 |
+
|
| 2454 |
+
elif isinstance(data, ABCCategorical):
|
| 2455 |
+
# GH#18664 preserve tz in going DTI->Categorical->DTI
|
| 2456 |
+
# TODO: cases where we need to do another pass through maybe_convert_dtype,
|
| 2457 |
+
# e.g. the categories are timedelta64s
|
| 2458 |
+
data = data.categories.take(data.codes, fill_value=NaT)._values
|
| 2459 |
+
copy = False
|
| 2460 |
+
|
| 2461 |
+
return data, copy
|
| 2462 |
+
|
| 2463 |
+
|
| 2464 |
+
@overload
|
| 2465 |
+
def validate_periods(periods: None) -> None:
|
| 2466 |
+
...
|
| 2467 |
+
|
| 2468 |
+
|
| 2469 |
+
@overload
|
| 2470 |
+
def validate_periods(periods: int | float) -> int:
|
| 2471 |
+
...
|
| 2472 |
+
|
| 2473 |
+
|
| 2474 |
+
def validate_periods(periods: int | float | None) -> int | None:
|
| 2475 |
+
"""
|
| 2476 |
+
If a `periods` argument is passed to the Datetime/Timedelta Array/Index
|
| 2477 |
+
constructor, cast it to an integer.
|
| 2478 |
+
|
| 2479 |
+
Parameters
|
| 2480 |
+
----------
|
| 2481 |
+
periods : None, float, int
|
| 2482 |
+
|
| 2483 |
+
Returns
|
| 2484 |
+
-------
|
| 2485 |
+
periods : None or int
|
| 2486 |
+
|
| 2487 |
+
Raises
|
| 2488 |
+
------
|
| 2489 |
+
TypeError
|
| 2490 |
+
if periods is None, float, or int
|
| 2491 |
+
"""
|
| 2492 |
+
if periods is not None:
|
| 2493 |
+
if lib.is_float(periods):
|
| 2494 |
+
warnings.warn(
|
| 2495 |
+
# GH#56036
|
| 2496 |
+
"Non-integer 'periods' in pd.date_range, pd.timedelta_range, "
|
| 2497 |
+
"pd.period_range, and pd.interval_range are deprecated and "
|
| 2498 |
+
"will raise in a future version.",
|
| 2499 |
+
FutureWarning,
|
| 2500 |
+
stacklevel=find_stack_level(),
|
| 2501 |
+
)
|
| 2502 |
+
periods = int(periods)
|
| 2503 |
+
elif not lib.is_integer(periods):
|
| 2504 |
+
raise TypeError(f"periods must be a number, got {periods}")
|
| 2505 |
+
return periods
|
| 2506 |
+
|
| 2507 |
+
|
| 2508 |
+
def _validate_inferred_freq(
|
| 2509 |
+
freq: BaseOffset | None, inferred_freq: BaseOffset | None
|
| 2510 |
+
) -> BaseOffset | None:
|
| 2511 |
+
"""
|
| 2512 |
+
If the user passes a freq and another freq is inferred from passed data,
|
| 2513 |
+
require that they match.
|
| 2514 |
+
|
| 2515 |
+
Parameters
|
| 2516 |
+
----------
|
| 2517 |
+
freq : DateOffset or None
|
| 2518 |
+
inferred_freq : DateOffset or None
|
| 2519 |
+
|
| 2520 |
+
Returns
|
| 2521 |
+
-------
|
| 2522 |
+
freq : DateOffset or None
|
| 2523 |
+
"""
|
| 2524 |
+
if inferred_freq is not None:
|
| 2525 |
+
if freq is not None and freq != inferred_freq:
|
| 2526 |
+
raise ValueError(
|
| 2527 |
+
f"Inferred frequency {inferred_freq} from passed "
|
| 2528 |
+
"values does not conform to passed frequency "
|
| 2529 |
+
f"{freq.freqstr}"
|
| 2530 |
+
)
|
| 2531 |
+
if freq is None:
|
| 2532 |
+
freq = inferred_freq
|
| 2533 |
+
|
| 2534 |
+
return freq
|
| 2535 |
+
|
| 2536 |
+
|
| 2537 |
+
def dtype_to_unit(dtype: DatetimeTZDtype | np.dtype | ArrowDtype) -> str:
|
| 2538 |
+
"""
|
| 2539 |
+
Return the unit str corresponding to the dtype's resolution.
|
| 2540 |
+
|
| 2541 |
+
Parameters
|
| 2542 |
+
----------
|
| 2543 |
+
dtype : DatetimeTZDtype or np.dtype
|
| 2544 |
+
If np.dtype, we assume it is a datetime64 dtype.
|
| 2545 |
+
|
| 2546 |
+
Returns
|
| 2547 |
+
-------
|
| 2548 |
+
str
|
| 2549 |
+
"""
|
| 2550 |
+
if isinstance(dtype, DatetimeTZDtype):
|
| 2551 |
+
return dtype.unit
|
| 2552 |
+
elif isinstance(dtype, ArrowDtype):
|
| 2553 |
+
if dtype.kind not in "mM":
|
| 2554 |
+
raise ValueError(f"{dtype=} does not have a resolution.")
|
| 2555 |
+
return dtype.pyarrow_dtype.unit
|
| 2556 |
+
return np.datetime_data(dtype)[0]
|
videollama2/lib/python3.10/site-packages/pandas/core/arrays/integer.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import ClassVar
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from pandas.core.dtypes.base import register_extension_dtype
|
| 8 |
+
from pandas.core.dtypes.common import is_integer_dtype
|
| 9 |
+
|
| 10 |
+
from pandas.core.arrays.numeric import (
|
| 11 |
+
NumericArray,
|
| 12 |
+
NumericDtype,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class IntegerDtype(NumericDtype):
|
| 17 |
+
"""
|
| 18 |
+
An ExtensionDtype to hold a single size & kind of integer dtype.
|
| 19 |
+
|
| 20 |
+
These specific implementations are subclasses of the non-public
|
| 21 |
+
IntegerDtype. For example, we have Int8Dtype to represent signed int 8s.
|
| 22 |
+
|
| 23 |
+
The attributes name & type are set when these subclasses are created.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
_default_np_dtype = np.dtype(np.int64)
|
| 27 |
+
_checker = is_integer_dtype
|
| 28 |
+
|
| 29 |
+
@classmethod
|
| 30 |
+
def construct_array_type(cls) -> type[IntegerArray]:
|
| 31 |
+
"""
|
| 32 |
+
Return the array type associated with this dtype.
|
| 33 |
+
|
| 34 |
+
Returns
|
| 35 |
+
-------
|
| 36 |
+
type
|
| 37 |
+
"""
|
| 38 |
+
return IntegerArray
|
| 39 |
+
|
| 40 |
+
@classmethod
|
| 41 |
+
def _get_dtype_mapping(cls) -> dict[np.dtype, IntegerDtype]:
|
| 42 |
+
return NUMPY_INT_TO_DTYPE
|
| 43 |
+
|
| 44 |
+
@classmethod
|
| 45 |
+
def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
|
| 46 |
+
"""
|
| 47 |
+
Safely cast the values to the given dtype.
|
| 48 |
+
|
| 49 |
+
"safe" in this context means the casting is lossless. e.g. if 'values'
|
| 50 |
+
has a floating dtype, each value must be an integer.
|
| 51 |
+
"""
|
| 52 |
+
try:
|
| 53 |
+
return values.astype(dtype, casting="safe", copy=copy)
|
| 54 |
+
except TypeError as err:
|
| 55 |
+
casted = values.astype(dtype, copy=copy)
|
| 56 |
+
if (casted == values).all():
|
| 57 |
+
return casted
|
| 58 |
+
|
| 59 |
+
raise TypeError(
|
| 60 |
+
f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
|
| 61 |
+
) from err
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class IntegerArray(NumericArray):
|
| 65 |
+
"""
|
| 66 |
+
Array of integer (optional missing) values.
|
| 67 |
+
|
| 68 |
+
Uses :attr:`pandas.NA` as the missing value.
|
| 69 |
+
|
| 70 |
+
.. warning::
|
| 71 |
+
|
| 72 |
+
IntegerArray is currently experimental, and its API or internal
|
| 73 |
+
implementation may change without warning.
|
| 74 |
+
|
| 75 |
+
We represent an IntegerArray with 2 numpy arrays:
|
| 76 |
+
|
| 77 |
+
- data: contains a numpy integer array of the appropriate dtype
|
| 78 |
+
- mask: a boolean array holding a mask on the data, True is missing
|
| 79 |
+
|
| 80 |
+
To construct an IntegerArray from generic array-like input, use
|
| 81 |
+
:func:`pandas.array` with one of the integer dtypes (see examples).
|
| 82 |
+
|
| 83 |
+
See :ref:`integer_na` for more.
|
| 84 |
+
|
| 85 |
+
Parameters
|
| 86 |
+
----------
|
| 87 |
+
values : numpy.ndarray
|
| 88 |
+
A 1-d integer-dtype array.
|
| 89 |
+
mask : numpy.ndarray
|
| 90 |
+
A 1-d boolean-dtype array indicating missing values.
|
| 91 |
+
copy : bool, default False
|
| 92 |
+
Whether to copy the `values` and `mask`.
|
| 93 |
+
|
| 94 |
+
Attributes
|
| 95 |
+
----------
|
| 96 |
+
None
|
| 97 |
+
|
| 98 |
+
Methods
|
| 99 |
+
-------
|
| 100 |
+
None
|
| 101 |
+
|
| 102 |
+
Returns
|
| 103 |
+
-------
|
| 104 |
+
IntegerArray
|
| 105 |
+
|
| 106 |
+
Examples
|
| 107 |
+
--------
|
| 108 |
+
Create an IntegerArray with :func:`pandas.array`.
|
| 109 |
+
|
| 110 |
+
>>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
|
| 111 |
+
>>> int_array
|
| 112 |
+
<IntegerArray>
|
| 113 |
+
[1, <NA>, 3]
|
| 114 |
+
Length: 3, dtype: Int32
|
| 115 |
+
|
| 116 |
+
String aliases for the dtypes are also available. They are capitalized.
|
| 117 |
+
|
| 118 |
+
>>> pd.array([1, None, 3], dtype='Int32')
|
| 119 |
+
<IntegerArray>
|
| 120 |
+
[1, <NA>, 3]
|
| 121 |
+
Length: 3, dtype: Int32
|
| 122 |
+
|
| 123 |
+
>>> pd.array([1, None, 3], dtype='UInt16')
|
| 124 |
+
<IntegerArray>
|
| 125 |
+
[1, <NA>, 3]
|
| 126 |
+
Length: 3, dtype: UInt16
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
_dtype_cls = IntegerDtype
|
| 130 |
+
|
| 131 |
+
# The value used to fill '_data' to avoid upcasting
|
| 132 |
+
_internal_fill_value = 1
|
| 133 |
+
# Fill values used for any/all
|
| 134 |
+
# Incompatible types in assignment (expression has type "int", base class
|
| 135 |
+
# "BaseMaskedArray" defined the type as "<typing special form>")
|
| 136 |
+
_truthy_value = 1 # type: ignore[assignment]
|
| 137 |
+
_falsey_value = 0 # type: ignore[assignment]
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
_dtype_docstring = """
|
| 141 |
+
An ExtensionDtype for {dtype} integer data.
|
| 142 |
+
|
| 143 |
+
Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`.
|
| 144 |
+
|
| 145 |
+
Attributes
|
| 146 |
+
----------
|
| 147 |
+
None
|
| 148 |
+
|
| 149 |
+
Methods
|
| 150 |
+
-------
|
| 151 |
+
None
|
| 152 |
+
|
| 153 |
+
Examples
|
| 154 |
+
--------
|
| 155 |
+
For Int8Dtype:
|
| 156 |
+
|
| 157 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype())
|
| 158 |
+
>>> ser.dtype
|
| 159 |
+
Int8Dtype()
|
| 160 |
+
|
| 161 |
+
For Int16Dtype:
|
| 162 |
+
|
| 163 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype())
|
| 164 |
+
>>> ser.dtype
|
| 165 |
+
Int16Dtype()
|
| 166 |
+
|
| 167 |
+
For Int32Dtype:
|
| 168 |
+
|
| 169 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype())
|
| 170 |
+
>>> ser.dtype
|
| 171 |
+
Int32Dtype()
|
| 172 |
+
|
| 173 |
+
For Int64Dtype:
|
| 174 |
+
|
| 175 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype())
|
| 176 |
+
>>> ser.dtype
|
| 177 |
+
Int64Dtype()
|
| 178 |
+
|
| 179 |
+
For UInt8Dtype:
|
| 180 |
+
|
| 181 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype())
|
| 182 |
+
>>> ser.dtype
|
| 183 |
+
UInt8Dtype()
|
| 184 |
+
|
| 185 |
+
For UInt16Dtype:
|
| 186 |
+
|
| 187 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype())
|
| 188 |
+
>>> ser.dtype
|
| 189 |
+
UInt16Dtype()
|
| 190 |
+
|
| 191 |
+
For UInt32Dtype:
|
| 192 |
+
|
| 193 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype())
|
| 194 |
+
>>> ser.dtype
|
| 195 |
+
UInt32Dtype()
|
| 196 |
+
|
| 197 |
+
For UInt64Dtype:
|
| 198 |
+
|
| 199 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype())
|
| 200 |
+
>>> ser.dtype
|
| 201 |
+
UInt64Dtype()
|
| 202 |
+
"""
|
| 203 |
+
|
| 204 |
+
# create the Dtype
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
@register_extension_dtype
|
| 208 |
+
class Int8Dtype(IntegerDtype):
|
| 209 |
+
type = np.int8
|
| 210 |
+
name: ClassVar[str] = "Int8"
|
| 211 |
+
__doc__ = _dtype_docstring.format(dtype="int8")
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
@register_extension_dtype
|
| 215 |
+
class Int16Dtype(IntegerDtype):
|
| 216 |
+
type = np.int16
|
| 217 |
+
name: ClassVar[str] = "Int16"
|
| 218 |
+
__doc__ = _dtype_docstring.format(dtype="int16")
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
@register_extension_dtype
|
| 222 |
+
class Int32Dtype(IntegerDtype):
|
| 223 |
+
type = np.int32
|
| 224 |
+
name: ClassVar[str] = "Int32"
|
| 225 |
+
__doc__ = _dtype_docstring.format(dtype="int32")
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
@register_extension_dtype
|
| 229 |
+
class Int64Dtype(IntegerDtype):
|
| 230 |
+
type = np.int64
|
| 231 |
+
name: ClassVar[str] = "Int64"
|
| 232 |
+
__doc__ = _dtype_docstring.format(dtype="int64")
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
@register_extension_dtype
|
| 236 |
+
class UInt8Dtype(IntegerDtype):
|
| 237 |
+
type = np.uint8
|
| 238 |
+
name: ClassVar[str] = "UInt8"
|
| 239 |
+
__doc__ = _dtype_docstring.format(dtype="uint8")
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
@register_extension_dtype
|
| 243 |
+
class UInt16Dtype(IntegerDtype):
|
| 244 |
+
type = np.uint16
|
| 245 |
+
name: ClassVar[str] = "UInt16"
|
| 246 |
+
__doc__ = _dtype_docstring.format(dtype="uint16")
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@register_extension_dtype
|
| 250 |
+
class UInt32Dtype(IntegerDtype):
|
| 251 |
+
type = np.uint32
|
| 252 |
+
name: ClassVar[str] = "UInt32"
|
| 253 |
+
__doc__ = _dtype_docstring.format(dtype="uint32")
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
@register_extension_dtype
|
| 257 |
+
class UInt64Dtype(IntegerDtype):
|
| 258 |
+
type = np.uint64
|
| 259 |
+
name: ClassVar[str] = "UInt64"
|
| 260 |
+
__doc__ = _dtype_docstring.format(dtype="uint64")
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
NUMPY_INT_TO_DTYPE: dict[np.dtype, IntegerDtype] = {
|
| 264 |
+
np.dtype(np.int8): Int8Dtype(),
|
| 265 |
+
np.dtype(np.int16): Int16Dtype(),
|
| 266 |
+
np.dtype(np.int32): Int32Dtype(),
|
| 267 |
+
np.dtype(np.int64): Int64Dtype(),
|
| 268 |
+
np.dtype(np.uint8): UInt8Dtype(),
|
| 269 |
+
np.dtype(np.uint16): UInt16Dtype(),
|
| 270 |
+
np.dtype(np.uint32): UInt32Dtype(),
|
| 271 |
+
np.dtype(np.uint64): UInt64Dtype(),
|
| 272 |
+
}
|
videollama2/lib/python3.10/site-packages/pandas/core/arrays/masked.py
ADDED
|
@@ -0,0 +1,1650 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import (
|
| 4 |
+
TYPE_CHECKING,
|
| 5 |
+
Any,
|
| 6 |
+
Callable,
|
| 7 |
+
Literal,
|
| 8 |
+
overload,
|
| 9 |
+
)
|
| 10 |
+
import warnings
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
from pandas._libs import (
|
| 15 |
+
lib,
|
| 16 |
+
missing as libmissing,
|
| 17 |
+
)
|
| 18 |
+
from pandas._libs.tslibs import is_supported_dtype
|
| 19 |
+
from pandas._typing import (
|
| 20 |
+
ArrayLike,
|
| 21 |
+
AstypeArg,
|
| 22 |
+
AxisInt,
|
| 23 |
+
DtypeObj,
|
| 24 |
+
FillnaOptions,
|
| 25 |
+
InterpolateOptions,
|
| 26 |
+
NpDtype,
|
| 27 |
+
PositionalIndexer,
|
| 28 |
+
Scalar,
|
| 29 |
+
ScalarIndexer,
|
| 30 |
+
Self,
|
| 31 |
+
SequenceIndexer,
|
| 32 |
+
Shape,
|
| 33 |
+
npt,
|
| 34 |
+
)
|
| 35 |
+
from pandas.compat import (
|
| 36 |
+
IS64,
|
| 37 |
+
is_platform_windows,
|
| 38 |
+
)
|
| 39 |
+
from pandas.errors import AbstractMethodError
|
| 40 |
+
from pandas.util._decorators import doc
|
| 41 |
+
from pandas.util._validators import validate_fillna_kwargs
|
| 42 |
+
|
| 43 |
+
from pandas.core.dtypes.base import ExtensionDtype
|
| 44 |
+
from pandas.core.dtypes.common import (
|
| 45 |
+
is_bool,
|
| 46 |
+
is_integer_dtype,
|
| 47 |
+
is_list_like,
|
| 48 |
+
is_scalar,
|
| 49 |
+
is_string_dtype,
|
| 50 |
+
pandas_dtype,
|
| 51 |
+
)
|
| 52 |
+
from pandas.core.dtypes.dtypes import BaseMaskedDtype
|
| 53 |
+
from pandas.core.dtypes.missing import (
|
| 54 |
+
array_equivalent,
|
| 55 |
+
is_valid_na_for_dtype,
|
| 56 |
+
isna,
|
| 57 |
+
notna,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
from pandas.core import (
|
| 61 |
+
algorithms as algos,
|
| 62 |
+
arraylike,
|
| 63 |
+
missing,
|
| 64 |
+
nanops,
|
| 65 |
+
ops,
|
| 66 |
+
)
|
| 67 |
+
from pandas.core.algorithms import (
|
| 68 |
+
factorize_array,
|
| 69 |
+
isin,
|
| 70 |
+
map_array,
|
| 71 |
+
mode,
|
| 72 |
+
take,
|
| 73 |
+
)
|
| 74 |
+
from pandas.core.array_algos import (
|
| 75 |
+
masked_accumulations,
|
| 76 |
+
masked_reductions,
|
| 77 |
+
)
|
| 78 |
+
from pandas.core.array_algos.quantile import quantile_with_mask
|
| 79 |
+
from pandas.core.arraylike import OpsMixin
|
| 80 |
+
from pandas.core.arrays._utils import to_numpy_dtype_inference
|
| 81 |
+
from pandas.core.arrays.base import ExtensionArray
|
| 82 |
+
from pandas.core.construction import (
|
| 83 |
+
array as pd_array,
|
| 84 |
+
ensure_wrapped_if_datetimelike,
|
| 85 |
+
extract_array,
|
| 86 |
+
)
|
| 87 |
+
from pandas.core.indexers import check_array_indexer
|
| 88 |
+
from pandas.core.ops import invalid_comparison
|
| 89 |
+
from pandas.core.util.hashing import hash_array
|
| 90 |
+
|
| 91 |
+
if TYPE_CHECKING:
|
| 92 |
+
from collections.abc import (
|
| 93 |
+
Iterator,
|
| 94 |
+
Sequence,
|
| 95 |
+
)
|
| 96 |
+
from pandas import Series
|
| 97 |
+
from pandas.core.arrays import BooleanArray
|
| 98 |
+
from pandas._typing import (
|
| 99 |
+
NumpySorter,
|
| 100 |
+
NumpyValueArrayLike,
|
| 101 |
+
)
|
| 102 |
+
from pandas.core.arrays import FloatingArray
|
| 103 |
+
|
| 104 |
+
from pandas.compat.numpy import function as nv
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class BaseMaskedArray(OpsMixin, ExtensionArray):
|
| 108 |
+
"""
|
| 109 |
+
Base class for masked arrays (which use _data and _mask to store the data).
|
| 110 |
+
|
| 111 |
+
numpy based
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
# The value used to fill '_data' to avoid upcasting
|
| 115 |
+
_internal_fill_value: Scalar
|
| 116 |
+
# our underlying data and mask are each ndarrays
|
| 117 |
+
_data: np.ndarray
|
| 118 |
+
_mask: npt.NDArray[np.bool_]
|
| 119 |
+
|
| 120 |
+
# Fill values used for any/all
|
| 121 |
+
_truthy_value = Scalar # bool(_truthy_value) = True
|
| 122 |
+
_falsey_value = Scalar # bool(_falsey_value) = False
|
| 123 |
+
|
| 124 |
+
@classmethod
|
| 125 |
+
def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self:
|
| 126 |
+
result = BaseMaskedArray.__new__(cls)
|
| 127 |
+
result._data = values
|
| 128 |
+
result._mask = mask
|
| 129 |
+
return result
|
| 130 |
+
|
| 131 |
+
def __init__(
|
| 132 |
+
self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
|
| 133 |
+
) -> None:
|
| 134 |
+
# values is supposed to already be validated in the subclass
|
| 135 |
+
if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_):
|
| 136 |
+
raise TypeError(
|
| 137 |
+
"mask should be boolean numpy array. Use "
|
| 138 |
+
"the 'pd.array' function instead"
|
| 139 |
+
)
|
| 140 |
+
if values.shape != mask.shape:
|
| 141 |
+
raise ValueError("values.shape must match mask.shape")
|
| 142 |
+
|
| 143 |
+
if copy:
|
| 144 |
+
values = values.copy()
|
| 145 |
+
mask = mask.copy()
|
| 146 |
+
|
| 147 |
+
self._data = values
|
| 148 |
+
self._mask = mask
|
| 149 |
+
|
| 150 |
+
@classmethod
|
| 151 |
+
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False) -> Self:
|
| 152 |
+
values, mask = cls._coerce_to_array(scalars, dtype=dtype, copy=copy)
|
| 153 |
+
return cls(values, mask)
|
| 154 |
+
|
| 155 |
+
@classmethod
|
| 156 |
+
@doc(ExtensionArray._empty)
|
| 157 |
+
def _empty(cls, shape: Shape, dtype: ExtensionDtype):
|
| 158 |
+
values = np.empty(shape, dtype=dtype.type)
|
| 159 |
+
values.fill(cls._internal_fill_value)
|
| 160 |
+
mask = np.ones(shape, dtype=bool)
|
| 161 |
+
result = cls(values, mask)
|
| 162 |
+
if not isinstance(result, cls) or dtype != result.dtype:
|
| 163 |
+
raise NotImplementedError(
|
| 164 |
+
f"Default 'empty' implementation is invalid for dtype='{dtype}'"
|
| 165 |
+
)
|
| 166 |
+
return result
|
| 167 |
+
|
| 168 |
+
def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
|
| 169 |
+
# NEP 51: https://github.com/numpy/numpy/pull/22449
|
| 170 |
+
return str
|
| 171 |
+
|
| 172 |
+
@property
|
| 173 |
+
def dtype(self) -> BaseMaskedDtype:
|
| 174 |
+
raise AbstractMethodError(self)
|
| 175 |
+
|
| 176 |
+
@overload
|
| 177 |
+
def __getitem__(self, item: ScalarIndexer) -> Any:
|
| 178 |
+
...
|
| 179 |
+
|
| 180 |
+
@overload
|
| 181 |
+
def __getitem__(self, item: SequenceIndexer) -> Self:
|
| 182 |
+
...
|
| 183 |
+
|
| 184 |
+
def __getitem__(self, item: PositionalIndexer) -> Self | Any:
|
| 185 |
+
item = check_array_indexer(self, item)
|
| 186 |
+
|
| 187 |
+
newmask = self._mask[item]
|
| 188 |
+
if is_bool(newmask):
|
| 189 |
+
# This is a scalar indexing
|
| 190 |
+
if newmask:
|
| 191 |
+
return self.dtype.na_value
|
| 192 |
+
return self._data[item]
|
| 193 |
+
|
| 194 |
+
return self._simple_new(self._data[item], newmask)
|
| 195 |
+
|
| 196 |
+
def _pad_or_backfill(
|
| 197 |
+
self,
|
| 198 |
+
*,
|
| 199 |
+
method: FillnaOptions,
|
| 200 |
+
limit: int | None = None,
|
| 201 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
| 202 |
+
copy: bool = True,
|
| 203 |
+
) -> Self:
|
| 204 |
+
mask = self._mask
|
| 205 |
+
|
| 206 |
+
if mask.any():
|
| 207 |
+
func = missing.get_fill_func(method, ndim=self.ndim)
|
| 208 |
+
|
| 209 |
+
npvalues = self._data.T
|
| 210 |
+
new_mask = mask.T
|
| 211 |
+
if copy:
|
| 212 |
+
npvalues = npvalues.copy()
|
| 213 |
+
new_mask = new_mask.copy()
|
| 214 |
+
elif limit_area is not None:
|
| 215 |
+
mask = mask.copy()
|
| 216 |
+
func(npvalues, limit=limit, mask=new_mask)
|
| 217 |
+
|
| 218 |
+
if limit_area is not None and not mask.all():
|
| 219 |
+
mask = mask.T
|
| 220 |
+
neg_mask = ~mask
|
| 221 |
+
first = neg_mask.argmax()
|
| 222 |
+
last = len(neg_mask) - neg_mask[::-1].argmax() - 1
|
| 223 |
+
if limit_area == "inside":
|
| 224 |
+
new_mask[:first] |= mask[:first]
|
| 225 |
+
new_mask[last + 1 :] |= mask[last + 1 :]
|
| 226 |
+
elif limit_area == "outside":
|
| 227 |
+
new_mask[first + 1 : last] |= mask[first + 1 : last]
|
| 228 |
+
|
| 229 |
+
if copy:
|
| 230 |
+
return self._simple_new(npvalues.T, new_mask.T)
|
| 231 |
+
else:
|
| 232 |
+
return self
|
| 233 |
+
else:
|
| 234 |
+
if copy:
|
| 235 |
+
new_values = self.copy()
|
| 236 |
+
else:
|
| 237 |
+
new_values = self
|
| 238 |
+
return new_values
|
| 239 |
+
|
| 240 |
+
@doc(ExtensionArray.fillna)
|
| 241 |
+
def fillna(
|
| 242 |
+
self, value=None, method=None, limit: int | None = None, copy: bool = True
|
| 243 |
+
) -> Self:
|
| 244 |
+
value, method = validate_fillna_kwargs(value, method)
|
| 245 |
+
|
| 246 |
+
mask = self._mask
|
| 247 |
+
|
| 248 |
+
value = missing.check_value_size(value, mask, len(self))
|
| 249 |
+
|
| 250 |
+
if mask.any():
|
| 251 |
+
if method is not None:
|
| 252 |
+
func = missing.get_fill_func(method, ndim=self.ndim)
|
| 253 |
+
npvalues = self._data.T
|
| 254 |
+
new_mask = mask.T
|
| 255 |
+
if copy:
|
| 256 |
+
npvalues = npvalues.copy()
|
| 257 |
+
new_mask = new_mask.copy()
|
| 258 |
+
func(npvalues, limit=limit, mask=new_mask)
|
| 259 |
+
return self._simple_new(npvalues.T, new_mask.T)
|
| 260 |
+
else:
|
| 261 |
+
# fill with value
|
| 262 |
+
if copy:
|
| 263 |
+
new_values = self.copy()
|
| 264 |
+
else:
|
| 265 |
+
new_values = self[:]
|
| 266 |
+
new_values[mask] = value
|
| 267 |
+
else:
|
| 268 |
+
if copy:
|
| 269 |
+
new_values = self.copy()
|
| 270 |
+
else:
|
| 271 |
+
new_values = self[:]
|
| 272 |
+
return new_values
|
| 273 |
+
|
| 274 |
+
@classmethod
|
| 275 |
+
def _coerce_to_array(
|
| 276 |
+
cls, values, *, dtype: DtypeObj, copy: bool = False
|
| 277 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
| 278 |
+
raise AbstractMethodError(cls)
|
| 279 |
+
|
| 280 |
+
def _validate_setitem_value(self, value):
|
| 281 |
+
"""
|
| 282 |
+
Check if we have a scalar that we can cast losslessly.
|
| 283 |
+
|
| 284 |
+
Raises
|
| 285 |
+
------
|
| 286 |
+
TypeError
|
| 287 |
+
"""
|
| 288 |
+
kind = self.dtype.kind
|
| 289 |
+
# TODO: get this all from np_can_hold_element?
|
| 290 |
+
if kind == "b":
|
| 291 |
+
if lib.is_bool(value):
|
| 292 |
+
return value
|
| 293 |
+
|
| 294 |
+
elif kind == "f":
|
| 295 |
+
if lib.is_integer(value) or lib.is_float(value):
|
| 296 |
+
return value
|
| 297 |
+
|
| 298 |
+
else:
|
| 299 |
+
if lib.is_integer(value) or (lib.is_float(value) and value.is_integer()):
|
| 300 |
+
return value
|
| 301 |
+
# TODO: unsigned checks
|
| 302 |
+
|
| 303 |
+
# Note: without the "str" here, the f-string rendering raises in
|
| 304 |
+
# py38 builds.
|
| 305 |
+
raise TypeError(f"Invalid value '{str(value)}' for dtype {self.dtype}")
|
| 306 |
+
|
| 307 |
+
def __setitem__(self, key, value) -> None:
|
| 308 |
+
key = check_array_indexer(self, key)
|
| 309 |
+
|
| 310 |
+
if is_scalar(value):
|
| 311 |
+
if is_valid_na_for_dtype(value, self.dtype):
|
| 312 |
+
self._mask[key] = True
|
| 313 |
+
else:
|
| 314 |
+
value = self._validate_setitem_value(value)
|
| 315 |
+
self._data[key] = value
|
| 316 |
+
self._mask[key] = False
|
| 317 |
+
return
|
| 318 |
+
|
| 319 |
+
value, mask = self._coerce_to_array(value, dtype=self.dtype)
|
| 320 |
+
|
| 321 |
+
self._data[key] = value
|
| 322 |
+
self._mask[key] = mask
|
| 323 |
+
|
| 324 |
+
def __contains__(self, key) -> bool:
|
| 325 |
+
if isna(key) and key is not self.dtype.na_value:
|
| 326 |
+
# GH#52840
|
| 327 |
+
if self._data.dtype.kind == "f" and lib.is_float(key):
|
| 328 |
+
return bool((np.isnan(self._data) & ~self._mask).any())
|
| 329 |
+
|
| 330 |
+
return bool(super().__contains__(key))
|
| 331 |
+
|
| 332 |
+
def __iter__(self) -> Iterator:
|
| 333 |
+
if self.ndim == 1:
|
| 334 |
+
if not self._hasna:
|
| 335 |
+
for val in self._data:
|
| 336 |
+
yield val
|
| 337 |
+
else:
|
| 338 |
+
na_value = self.dtype.na_value
|
| 339 |
+
for isna_, val in zip(self._mask, self._data):
|
| 340 |
+
if isna_:
|
| 341 |
+
yield na_value
|
| 342 |
+
else:
|
| 343 |
+
yield val
|
| 344 |
+
else:
|
| 345 |
+
for i in range(len(self)):
|
| 346 |
+
yield self[i]
|
| 347 |
+
|
| 348 |
+
def __len__(self) -> int:
|
| 349 |
+
return len(self._data)
|
| 350 |
+
|
| 351 |
+
@property
|
| 352 |
+
def shape(self) -> Shape:
|
| 353 |
+
return self._data.shape
|
| 354 |
+
|
| 355 |
+
@property
|
| 356 |
+
def ndim(self) -> int:
|
| 357 |
+
return self._data.ndim
|
| 358 |
+
|
| 359 |
+
def swapaxes(self, axis1, axis2) -> Self:
|
| 360 |
+
data = self._data.swapaxes(axis1, axis2)
|
| 361 |
+
mask = self._mask.swapaxes(axis1, axis2)
|
| 362 |
+
return self._simple_new(data, mask)
|
| 363 |
+
|
| 364 |
+
def delete(self, loc, axis: AxisInt = 0) -> Self:
|
| 365 |
+
data = np.delete(self._data, loc, axis=axis)
|
| 366 |
+
mask = np.delete(self._mask, loc, axis=axis)
|
| 367 |
+
return self._simple_new(data, mask)
|
| 368 |
+
|
| 369 |
+
def reshape(self, *args, **kwargs) -> Self:
|
| 370 |
+
data = self._data.reshape(*args, **kwargs)
|
| 371 |
+
mask = self._mask.reshape(*args, **kwargs)
|
| 372 |
+
return self._simple_new(data, mask)
|
| 373 |
+
|
| 374 |
+
def ravel(self, *args, **kwargs) -> Self:
|
| 375 |
+
# TODO: need to make sure we have the same order for data/mask
|
| 376 |
+
data = self._data.ravel(*args, **kwargs)
|
| 377 |
+
mask = self._mask.ravel(*args, **kwargs)
|
| 378 |
+
return type(self)(data, mask)
|
| 379 |
+
|
| 380 |
+
@property
|
| 381 |
+
def T(self) -> Self:
|
| 382 |
+
return self._simple_new(self._data.T, self._mask.T)
|
| 383 |
+
|
| 384 |
+
def round(self, decimals: int = 0, *args, **kwargs):
|
| 385 |
+
"""
|
| 386 |
+
Round each value in the array a to the given number of decimals.
|
| 387 |
+
|
| 388 |
+
Parameters
|
| 389 |
+
----------
|
| 390 |
+
decimals : int, default 0
|
| 391 |
+
Number of decimal places to round to. If decimals is negative,
|
| 392 |
+
it specifies the number of positions to the left of the decimal point.
|
| 393 |
+
*args, **kwargs
|
| 394 |
+
Additional arguments and keywords have no effect but might be
|
| 395 |
+
accepted for compatibility with NumPy.
|
| 396 |
+
|
| 397 |
+
Returns
|
| 398 |
+
-------
|
| 399 |
+
NumericArray
|
| 400 |
+
Rounded values of the NumericArray.
|
| 401 |
+
|
| 402 |
+
See Also
|
| 403 |
+
--------
|
| 404 |
+
numpy.around : Round values of an np.array.
|
| 405 |
+
DataFrame.round : Round values of a DataFrame.
|
| 406 |
+
Series.round : Round values of a Series.
|
| 407 |
+
"""
|
| 408 |
+
if self.dtype.kind == "b":
|
| 409 |
+
return self
|
| 410 |
+
nv.validate_round(args, kwargs)
|
| 411 |
+
values = np.round(self._data, decimals=decimals, **kwargs)
|
| 412 |
+
|
| 413 |
+
# Usually we'll get same type as self, but ndarray[bool] casts to float
|
| 414 |
+
return self._maybe_mask_result(values, self._mask.copy())
|
| 415 |
+
|
| 416 |
+
# ------------------------------------------------------------------
|
| 417 |
+
# Unary Methods
|
| 418 |
+
|
| 419 |
+
def __invert__(self) -> Self:
|
| 420 |
+
return self._simple_new(~self._data, self._mask.copy())
|
| 421 |
+
|
| 422 |
+
def __neg__(self) -> Self:
|
| 423 |
+
return self._simple_new(-self._data, self._mask.copy())
|
| 424 |
+
|
| 425 |
+
def __pos__(self) -> Self:
|
| 426 |
+
return self.copy()
|
| 427 |
+
|
| 428 |
+
def __abs__(self) -> Self:
|
| 429 |
+
return self._simple_new(abs(self._data), self._mask.copy())
|
| 430 |
+
|
| 431 |
+
# ------------------------------------------------------------------
|
| 432 |
+
|
| 433 |
+
def _values_for_json(self) -> np.ndarray:
|
| 434 |
+
return np.asarray(self, dtype=object)
|
| 435 |
+
|
| 436 |
+
def to_numpy(
|
| 437 |
+
self,
|
| 438 |
+
dtype: npt.DTypeLike | None = None,
|
| 439 |
+
copy: bool = False,
|
| 440 |
+
na_value: object = lib.no_default,
|
| 441 |
+
) -> np.ndarray:
|
| 442 |
+
"""
|
| 443 |
+
Convert to a NumPy Array.
|
| 444 |
+
|
| 445 |
+
By default converts to an object-dtype NumPy array. Specify the `dtype` and
|
| 446 |
+
`na_value` keywords to customize the conversion.
|
| 447 |
+
|
| 448 |
+
Parameters
|
| 449 |
+
----------
|
| 450 |
+
dtype : dtype, default object
|
| 451 |
+
The numpy dtype to convert to.
|
| 452 |
+
copy : bool, default False
|
| 453 |
+
Whether to ensure that the returned value is a not a view on
|
| 454 |
+
the array. Note that ``copy=False`` does not *ensure* that
|
| 455 |
+
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
|
| 456 |
+
a copy is made, even if not strictly necessary. This is typically
|
| 457 |
+
only possible when no missing values are present and `dtype`
|
| 458 |
+
is the equivalent numpy dtype.
|
| 459 |
+
na_value : scalar, optional
|
| 460 |
+
Scalar missing value indicator to use in numpy array. Defaults
|
| 461 |
+
to the native missing value indicator of this array (pd.NA).
|
| 462 |
+
|
| 463 |
+
Returns
|
| 464 |
+
-------
|
| 465 |
+
numpy.ndarray
|
| 466 |
+
|
| 467 |
+
Examples
|
| 468 |
+
--------
|
| 469 |
+
An object-dtype is the default result
|
| 470 |
+
|
| 471 |
+
>>> a = pd.array([True, False, pd.NA], dtype="boolean")
|
| 472 |
+
>>> a.to_numpy()
|
| 473 |
+
array([True, False, <NA>], dtype=object)
|
| 474 |
+
|
| 475 |
+
When no missing values are present, an equivalent dtype can be used.
|
| 476 |
+
|
| 477 |
+
>>> pd.array([True, False], dtype="boolean").to_numpy(dtype="bool")
|
| 478 |
+
array([ True, False])
|
| 479 |
+
>>> pd.array([1, 2], dtype="Int64").to_numpy("int64")
|
| 480 |
+
array([1, 2])
|
| 481 |
+
|
| 482 |
+
However, requesting such dtype will raise a ValueError if
|
| 483 |
+
missing values are present and the default missing value :attr:`NA`
|
| 484 |
+
is used.
|
| 485 |
+
|
| 486 |
+
>>> a = pd.array([True, False, pd.NA], dtype="boolean")
|
| 487 |
+
>>> a
|
| 488 |
+
<BooleanArray>
|
| 489 |
+
[True, False, <NA>]
|
| 490 |
+
Length: 3, dtype: boolean
|
| 491 |
+
|
| 492 |
+
>>> a.to_numpy(dtype="bool")
|
| 493 |
+
Traceback (most recent call last):
|
| 494 |
+
...
|
| 495 |
+
ValueError: cannot convert to bool numpy array in presence of missing values
|
| 496 |
+
|
| 497 |
+
Specify a valid `na_value` instead
|
| 498 |
+
|
| 499 |
+
>>> a.to_numpy(dtype="bool", na_value=False)
|
| 500 |
+
array([ True, False, False])
|
| 501 |
+
"""
|
| 502 |
+
hasna = self._hasna
|
| 503 |
+
dtype, na_value = to_numpy_dtype_inference(self, dtype, na_value, hasna)
|
| 504 |
+
if dtype is None:
|
| 505 |
+
dtype = object
|
| 506 |
+
|
| 507 |
+
if hasna:
|
| 508 |
+
if (
|
| 509 |
+
dtype != object
|
| 510 |
+
and not is_string_dtype(dtype)
|
| 511 |
+
and na_value is libmissing.NA
|
| 512 |
+
):
|
| 513 |
+
raise ValueError(
|
| 514 |
+
f"cannot convert to '{dtype}'-dtype NumPy array "
|
| 515 |
+
"with missing values. Specify an appropriate 'na_value' "
|
| 516 |
+
"for this dtype."
|
| 517 |
+
)
|
| 518 |
+
# don't pass copy to astype -> always need a copy since we are mutating
|
| 519 |
+
with warnings.catch_warnings():
|
| 520 |
+
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
| 521 |
+
data = self._data.astype(dtype)
|
| 522 |
+
data[self._mask] = na_value
|
| 523 |
+
else:
|
| 524 |
+
with warnings.catch_warnings():
|
| 525 |
+
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
| 526 |
+
data = self._data.astype(dtype, copy=copy)
|
| 527 |
+
return data
|
| 528 |
+
|
| 529 |
+
@doc(ExtensionArray.tolist)
|
| 530 |
+
def tolist(self):
|
| 531 |
+
if self.ndim > 1:
|
| 532 |
+
return [x.tolist() for x in self]
|
| 533 |
+
dtype = None if self._hasna else self._data.dtype
|
| 534 |
+
return self.to_numpy(dtype=dtype, na_value=libmissing.NA).tolist()
|
| 535 |
+
|
| 536 |
+
@overload
|
| 537 |
+
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
|
| 538 |
+
...
|
| 539 |
+
|
| 540 |
+
@overload
|
| 541 |
+
def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
|
| 542 |
+
...
|
| 543 |
+
|
| 544 |
+
@overload
|
| 545 |
+
def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
|
| 546 |
+
...
|
| 547 |
+
|
| 548 |
+
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
|
| 549 |
+
dtype = pandas_dtype(dtype)
|
| 550 |
+
|
| 551 |
+
if dtype == self.dtype:
|
| 552 |
+
if copy:
|
| 553 |
+
return self.copy()
|
| 554 |
+
return self
|
| 555 |
+
|
| 556 |
+
# if we are astyping to another nullable masked dtype, we can fastpath
|
| 557 |
+
if isinstance(dtype, BaseMaskedDtype):
|
| 558 |
+
# TODO deal with NaNs for FloatingArray case
|
| 559 |
+
with warnings.catch_warnings():
|
| 560 |
+
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
| 561 |
+
# TODO: Is rounding what we want long term?
|
| 562 |
+
data = self._data.astype(dtype.numpy_dtype, copy=copy)
|
| 563 |
+
# mask is copied depending on whether the data was copied, and
|
| 564 |
+
# not directly depending on the `copy` keyword
|
| 565 |
+
mask = self._mask if data is self._data else self._mask.copy()
|
| 566 |
+
cls = dtype.construct_array_type()
|
| 567 |
+
return cls(data, mask, copy=False)
|
| 568 |
+
|
| 569 |
+
if isinstance(dtype, ExtensionDtype):
|
| 570 |
+
eacls = dtype.construct_array_type()
|
| 571 |
+
return eacls._from_sequence(self, dtype=dtype, copy=copy)
|
| 572 |
+
|
| 573 |
+
na_value: float | np.datetime64 | lib.NoDefault
|
| 574 |
+
|
| 575 |
+
# coerce
|
| 576 |
+
if dtype.kind == "f":
|
| 577 |
+
# In astype, we consider dtype=float to also mean na_value=np.nan
|
| 578 |
+
na_value = np.nan
|
| 579 |
+
elif dtype.kind == "M":
|
| 580 |
+
na_value = np.datetime64("NaT")
|
| 581 |
+
else:
|
| 582 |
+
na_value = lib.no_default
|
| 583 |
+
|
| 584 |
+
# to_numpy will also raise, but we get somewhat nicer exception messages here
|
| 585 |
+
if dtype.kind in "iu" and self._hasna:
|
| 586 |
+
raise ValueError("cannot convert NA to integer")
|
| 587 |
+
if dtype.kind == "b" and self._hasna:
|
| 588 |
+
# careful: astype_nansafe converts np.nan to True
|
| 589 |
+
raise ValueError("cannot convert float NaN to bool")
|
| 590 |
+
|
| 591 |
+
data = self.to_numpy(dtype=dtype, na_value=na_value, copy=copy)
|
| 592 |
+
return data
|
| 593 |
+
|
| 594 |
+
__array_priority__ = 1000 # higher than ndarray so ops dispatch to us
|
| 595 |
+
|
| 596 |
+
def __array__(
|
| 597 |
+
self, dtype: NpDtype | None = None, copy: bool | None = None
|
| 598 |
+
) -> np.ndarray:
|
| 599 |
+
"""
|
| 600 |
+
the array interface, return my values
|
| 601 |
+
We return an object array here to preserve our scalar values
|
| 602 |
+
"""
|
| 603 |
+
return self.to_numpy(dtype=dtype)
|
| 604 |
+
|
| 605 |
+
_HANDLED_TYPES: tuple[type, ...]
|
| 606 |
+
|
| 607 |
+
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
| 608 |
+
# For MaskedArray inputs, we apply the ufunc to ._data
|
| 609 |
+
# and mask the result.
|
| 610 |
+
|
| 611 |
+
out = kwargs.get("out", ())
|
| 612 |
+
|
| 613 |
+
for x in inputs + out:
|
| 614 |
+
if not isinstance(x, self._HANDLED_TYPES + (BaseMaskedArray,)):
|
| 615 |
+
return NotImplemented
|
| 616 |
+
|
| 617 |
+
# for binary ops, use our custom dunder methods
|
| 618 |
+
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
|
| 619 |
+
self, ufunc, method, *inputs, **kwargs
|
| 620 |
+
)
|
| 621 |
+
if result is not NotImplemented:
|
| 622 |
+
return result
|
| 623 |
+
|
| 624 |
+
if "out" in kwargs:
|
| 625 |
+
# e.g. test_ufunc_with_out
|
| 626 |
+
return arraylike.dispatch_ufunc_with_out(
|
| 627 |
+
self, ufunc, method, *inputs, **kwargs
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
if method == "reduce":
|
| 631 |
+
result = arraylike.dispatch_reduction_ufunc(
|
| 632 |
+
self, ufunc, method, *inputs, **kwargs
|
| 633 |
+
)
|
| 634 |
+
if result is not NotImplemented:
|
| 635 |
+
return result
|
| 636 |
+
|
| 637 |
+
mask = np.zeros(len(self), dtype=bool)
|
| 638 |
+
inputs2 = []
|
| 639 |
+
for x in inputs:
|
| 640 |
+
if isinstance(x, BaseMaskedArray):
|
| 641 |
+
mask |= x._mask
|
| 642 |
+
inputs2.append(x._data)
|
| 643 |
+
else:
|
| 644 |
+
inputs2.append(x)
|
| 645 |
+
|
| 646 |
+
def reconstruct(x: np.ndarray):
|
| 647 |
+
# we don't worry about scalar `x` here, since we
|
| 648 |
+
# raise for reduce up above.
|
| 649 |
+
from pandas.core.arrays import (
|
| 650 |
+
BooleanArray,
|
| 651 |
+
FloatingArray,
|
| 652 |
+
IntegerArray,
|
| 653 |
+
)
|
| 654 |
+
|
| 655 |
+
if x.dtype.kind == "b":
|
| 656 |
+
m = mask.copy()
|
| 657 |
+
return BooleanArray(x, m)
|
| 658 |
+
elif x.dtype.kind in "iu":
|
| 659 |
+
m = mask.copy()
|
| 660 |
+
return IntegerArray(x, m)
|
| 661 |
+
elif x.dtype.kind == "f":
|
| 662 |
+
m = mask.copy()
|
| 663 |
+
if x.dtype == np.float16:
|
| 664 |
+
# reached in e.g. np.sqrt on BooleanArray
|
| 665 |
+
# we don't support float16
|
| 666 |
+
x = x.astype(np.float32)
|
| 667 |
+
return FloatingArray(x, m)
|
| 668 |
+
else:
|
| 669 |
+
x[mask] = np.nan
|
| 670 |
+
return x
|
| 671 |
+
|
| 672 |
+
result = getattr(ufunc, method)(*inputs2, **kwargs)
|
| 673 |
+
if ufunc.nout > 1:
|
| 674 |
+
# e.g. np.divmod
|
| 675 |
+
return tuple(reconstruct(x) for x in result)
|
| 676 |
+
elif method == "reduce":
|
| 677 |
+
# e.g. np.add.reduce; test_ufunc_reduce_raises
|
| 678 |
+
if self._mask.any():
|
| 679 |
+
return self._na_value
|
| 680 |
+
return result
|
| 681 |
+
else:
|
| 682 |
+
return reconstruct(result)
|
| 683 |
+
|
| 684 |
+
def __arrow_array__(self, type=None):
|
| 685 |
+
"""
|
| 686 |
+
Convert myself into a pyarrow Array.
|
| 687 |
+
"""
|
| 688 |
+
import pyarrow as pa
|
| 689 |
+
|
| 690 |
+
return pa.array(self._data, mask=self._mask, type=type)
|
| 691 |
+
|
| 692 |
+
@property
|
| 693 |
+
def _hasna(self) -> bool:
|
| 694 |
+
# Note: this is expensive right now! The hope is that we can
|
| 695 |
+
# make this faster by having an optional mask, but not have to change
|
| 696 |
+
# source code using it..
|
| 697 |
+
|
| 698 |
+
# error: Incompatible return value type (got "bool_", expected "bool")
|
| 699 |
+
return self._mask.any() # type: ignore[return-value]
|
| 700 |
+
|
| 701 |
+
def _propagate_mask(
|
| 702 |
+
self, mask: npt.NDArray[np.bool_] | None, other
|
| 703 |
+
) -> npt.NDArray[np.bool_]:
|
| 704 |
+
if mask is None:
|
| 705 |
+
mask = self._mask.copy() # TODO: need test for BooleanArray needing a copy
|
| 706 |
+
if other is libmissing.NA:
|
| 707 |
+
# GH#45421 don't alter inplace
|
| 708 |
+
mask = mask | True
|
| 709 |
+
elif is_list_like(other) and len(other) == len(mask):
|
| 710 |
+
mask = mask | isna(other)
|
| 711 |
+
else:
|
| 712 |
+
mask = self._mask | mask
|
| 713 |
+
# Incompatible return value type (got "Optional[ndarray[Any, dtype[bool_]]]",
|
| 714 |
+
# expected "ndarray[Any, dtype[bool_]]")
|
| 715 |
+
return mask # type: ignore[return-value]
|
| 716 |
+
|
| 717 |
+
def _arith_method(self, other, op):
|
| 718 |
+
op_name = op.__name__
|
| 719 |
+
omask = None
|
| 720 |
+
|
| 721 |
+
if (
|
| 722 |
+
not hasattr(other, "dtype")
|
| 723 |
+
and is_list_like(other)
|
| 724 |
+
and len(other) == len(self)
|
| 725 |
+
):
|
| 726 |
+
# Try inferring masked dtype instead of casting to object
|
| 727 |
+
other = pd_array(other)
|
| 728 |
+
other = extract_array(other, extract_numpy=True)
|
| 729 |
+
|
| 730 |
+
if isinstance(other, BaseMaskedArray):
|
| 731 |
+
other, omask = other._data, other._mask
|
| 732 |
+
|
| 733 |
+
elif is_list_like(other):
|
| 734 |
+
if not isinstance(other, ExtensionArray):
|
| 735 |
+
other = np.asarray(other)
|
| 736 |
+
if other.ndim > 1:
|
| 737 |
+
raise NotImplementedError("can only perform ops with 1-d structures")
|
| 738 |
+
|
| 739 |
+
# We wrap the non-masked arithmetic logic used for numpy dtypes
|
| 740 |
+
# in Series/Index arithmetic ops.
|
| 741 |
+
other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
|
| 742 |
+
pd_op = ops.get_array_op(op)
|
| 743 |
+
other = ensure_wrapped_if_datetimelike(other)
|
| 744 |
+
|
| 745 |
+
if op_name in {"pow", "rpow"} and isinstance(other, np.bool_):
|
| 746 |
+
# Avoid DeprecationWarning: In future, it will be an error
|
| 747 |
+
# for 'np.bool_' scalars to be interpreted as an index
|
| 748 |
+
# e.g. test_array_scalar_like_equivalence
|
| 749 |
+
other = bool(other)
|
| 750 |
+
|
| 751 |
+
mask = self._propagate_mask(omask, other)
|
| 752 |
+
|
| 753 |
+
if other is libmissing.NA:
|
| 754 |
+
result = np.ones_like(self._data)
|
| 755 |
+
if self.dtype.kind == "b":
|
| 756 |
+
if op_name in {
|
| 757 |
+
"floordiv",
|
| 758 |
+
"rfloordiv",
|
| 759 |
+
"pow",
|
| 760 |
+
"rpow",
|
| 761 |
+
"truediv",
|
| 762 |
+
"rtruediv",
|
| 763 |
+
}:
|
| 764 |
+
# GH#41165 Try to match non-masked Series behavior
|
| 765 |
+
# This is still imperfect GH#46043
|
| 766 |
+
raise NotImplementedError(
|
| 767 |
+
f"operator '{op_name}' not implemented for bool dtypes"
|
| 768 |
+
)
|
| 769 |
+
if op_name in {"mod", "rmod"}:
|
| 770 |
+
dtype = "int8"
|
| 771 |
+
else:
|
| 772 |
+
dtype = "bool"
|
| 773 |
+
result = result.astype(dtype)
|
| 774 |
+
elif "truediv" in op_name and self.dtype.kind != "f":
|
| 775 |
+
# The actual data here doesn't matter since the mask
|
| 776 |
+
# will be all-True, but since this is division, we want
|
| 777 |
+
# to end up with floating dtype.
|
| 778 |
+
result = result.astype(np.float64)
|
| 779 |
+
else:
|
| 780 |
+
# Make sure we do this before the "pow" mask checks
|
| 781 |
+
# to get an expected exception message on shape mismatch.
|
| 782 |
+
if self.dtype.kind in "iu" and op_name in ["floordiv", "mod"]:
|
| 783 |
+
# TODO(GH#30188) ATM we don't match the behavior of non-masked
|
| 784 |
+
# types with respect to floordiv-by-zero
|
| 785 |
+
pd_op = op
|
| 786 |
+
|
| 787 |
+
with np.errstate(all="ignore"):
|
| 788 |
+
result = pd_op(self._data, other)
|
| 789 |
+
|
| 790 |
+
if op_name == "pow":
|
| 791 |
+
# 1 ** x is 1.
|
| 792 |
+
mask = np.where((self._data == 1) & ~self._mask, False, mask)
|
| 793 |
+
# x ** 0 is 1.
|
| 794 |
+
if omask is not None:
|
| 795 |
+
mask = np.where((other == 0) & ~omask, False, mask)
|
| 796 |
+
elif other is not libmissing.NA:
|
| 797 |
+
mask = np.where(other == 0, False, mask)
|
| 798 |
+
|
| 799 |
+
elif op_name == "rpow":
|
| 800 |
+
# 1 ** x is 1.
|
| 801 |
+
if omask is not None:
|
| 802 |
+
mask = np.where((other == 1) & ~omask, False, mask)
|
| 803 |
+
elif other is not libmissing.NA:
|
| 804 |
+
mask = np.where(other == 1, False, mask)
|
| 805 |
+
# x ** 0 is 1.
|
| 806 |
+
mask = np.where((self._data == 0) & ~self._mask, False, mask)
|
| 807 |
+
|
| 808 |
+
return self._maybe_mask_result(result, mask)
|
| 809 |
+
|
| 810 |
+
_logical_method = _arith_method
|
| 811 |
+
|
| 812 |
+
def _cmp_method(self, other, op) -> BooleanArray:
|
| 813 |
+
from pandas.core.arrays import BooleanArray
|
| 814 |
+
|
| 815 |
+
mask = None
|
| 816 |
+
|
| 817 |
+
if isinstance(other, BaseMaskedArray):
|
| 818 |
+
other, mask = other._data, other._mask
|
| 819 |
+
|
| 820 |
+
elif is_list_like(other):
|
| 821 |
+
other = np.asarray(other)
|
| 822 |
+
if other.ndim > 1:
|
| 823 |
+
raise NotImplementedError("can only perform ops with 1-d structures")
|
| 824 |
+
if len(self) != len(other):
|
| 825 |
+
raise ValueError("Lengths must match to compare")
|
| 826 |
+
|
| 827 |
+
if other is libmissing.NA:
|
| 828 |
+
# numpy does not handle pd.NA well as "other" scalar (it returns
|
| 829 |
+
# a scalar False instead of an array)
|
| 830 |
+
# This may be fixed by NA.__array_ufunc__. Revisit this check
|
| 831 |
+
# once that's implemented.
|
| 832 |
+
result = np.zeros(self._data.shape, dtype="bool")
|
| 833 |
+
mask = np.ones(self._data.shape, dtype="bool")
|
| 834 |
+
else:
|
| 835 |
+
with warnings.catch_warnings():
|
| 836 |
+
# numpy may show a FutureWarning or DeprecationWarning:
|
| 837 |
+
# elementwise comparison failed; returning scalar instead,
|
| 838 |
+
# but in the future will perform elementwise comparison
|
| 839 |
+
# before returning NotImplemented. We fall back to the correct
|
| 840 |
+
# behavior today, so that should be fine to ignore.
|
| 841 |
+
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
|
| 842 |
+
warnings.filterwarnings("ignore", "elementwise", DeprecationWarning)
|
| 843 |
+
method = getattr(self._data, f"__{op.__name__}__")
|
| 844 |
+
result = method(other)
|
| 845 |
+
|
| 846 |
+
if result is NotImplemented:
|
| 847 |
+
result = invalid_comparison(self._data, other, op)
|
| 848 |
+
|
| 849 |
+
mask = self._propagate_mask(mask, other)
|
| 850 |
+
return BooleanArray(result, mask, copy=False)
|
| 851 |
+
|
| 852 |
+
def _maybe_mask_result(
|
| 853 |
+
self, result: np.ndarray | tuple[np.ndarray, np.ndarray], mask: np.ndarray
|
| 854 |
+
):
|
| 855 |
+
"""
|
| 856 |
+
Parameters
|
| 857 |
+
----------
|
| 858 |
+
result : array-like or tuple[array-like]
|
| 859 |
+
mask : array-like bool
|
| 860 |
+
"""
|
| 861 |
+
if isinstance(result, tuple):
|
| 862 |
+
# i.e. divmod
|
| 863 |
+
div, mod = result
|
| 864 |
+
return (
|
| 865 |
+
self._maybe_mask_result(div, mask),
|
| 866 |
+
self._maybe_mask_result(mod, mask),
|
| 867 |
+
)
|
| 868 |
+
|
| 869 |
+
if result.dtype.kind == "f":
|
| 870 |
+
from pandas.core.arrays import FloatingArray
|
| 871 |
+
|
| 872 |
+
return FloatingArray(result, mask, copy=False)
|
| 873 |
+
|
| 874 |
+
elif result.dtype.kind == "b":
|
| 875 |
+
from pandas.core.arrays import BooleanArray
|
| 876 |
+
|
| 877 |
+
return BooleanArray(result, mask, copy=False)
|
| 878 |
+
|
| 879 |
+
elif lib.is_np_dtype(result.dtype, "m") and is_supported_dtype(result.dtype):
|
| 880 |
+
# e.g. test_numeric_arr_mul_tdscalar_numexpr_path
|
| 881 |
+
from pandas.core.arrays import TimedeltaArray
|
| 882 |
+
|
| 883 |
+
result[mask] = result.dtype.type("NaT")
|
| 884 |
+
|
| 885 |
+
if not isinstance(result, TimedeltaArray):
|
| 886 |
+
return TimedeltaArray._simple_new(result, dtype=result.dtype)
|
| 887 |
+
|
| 888 |
+
return result
|
| 889 |
+
|
| 890 |
+
elif result.dtype.kind in "iu":
|
| 891 |
+
from pandas.core.arrays import IntegerArray
|
| 892 |
+
|
| 893 |
+
return IntegerArray(result, mask, copy=False)
|
| 894 |
+
|
| 895 |
+
else:
|
| 896 |
+
result[mask] = np.nan
|
| 897 |
+
return result
|
| 898 |
+
|
| 899 |
+
def isna(self) -> np.ndarray:
|
| 900 |
+
return self._mask.copy()
|
| 901 |
+
|
| 902 |
+
@property
|
| 903 |
+
def _na_value(self):
|
| 904 |
+
return self.dtype.na_value
|
| 905 |
+
|
| 906 |
+
@property
|
| 907 |
+
def nbytes(self) -> int:
|
| 908 |
+
return self._data.nbytes + self._mask.nbytes
|
| 909 |
+
|
| 910 |
+
@classmethod
|
| 911 |
+
def _concat_same_type(
|
| 912 |
+
cls,
|
| 913 |
+
to_concat: Sequence[Self],
|
| 914 |
+
axis: AxisInt = 0,
|
| 915 |
+
) -> Self:
|
| 916 |
+
data = np.concatenate([x._data for x in to_concat], axis=axis)
|
| 917 |
+
mask = np.concatenate([x._mask for x in to_concat], axis=axis)
|
| 918 |
+
return cls(data, mask)
|
| 919 |
+
|
| 920 |
+
def _hash_pandas_object(
|
| 921 |
+
self, *, encoding: str, hash_key: str, categorize: bool
|
| 922 |
+
) -> npt.NDArray[np.uint64]:
|
| 923 |
+
hashed_array = hash_array(
|
| 924 |
+
self._data, encoding=encoding, hash_key=hash_key, categorize=categorize
|
| 925 |
+
)
|
| 926 |
+
hashed_array[self.isna()] = hash(self.dtype.na_value)
|
| 927 |
+
return hashed_array
|
| 928 |
+
|
| 929 |
+
def take(
|
| 930 |
+
self,
|
| 931 |
+
indexer,
|
| 932 |
+
*,
|
| 933 |
+
allow_fill: bool = False,
|
| 934 |
+
fill_value: Scalar | None = None,
|
| 935 |
+
axis: AxisInt = 0,
|
| 936 |
+
) -> Self:
|
| 937 |
+
# we always fill with 1 internally
|
| 938 |
+
# to avoid upcasting
|
| 939 |
+
data_fill_value = self._internal_fill_value if isna(fill_value) else fill_value
|
| 940 |
+
result = take(
|
| 941 |
+
self._data,
|
| 942 |
+
indexer,
|
| 943 |
+
fill_value=data_fill_value,
|
| 944 |
+
allow_fill=allow_fill,
|
| 945 |
+
axis=axis,
|
| 946 |
+
)
|
| 947 |
+
|
| 948 |
+
mask = take(
|
| 949 |
+
self._mask, indexer, fill_value=True, allow_fill=allow_fill, axis=axis
|
| 950 |
+
)
|
| 951 |
+
|
| 952 |
+
# if we are filling
|
| 953 |
+
# we only fill where the indexer is null
|
| 954 |
+
# not existing missing values
|
| 955 |
+
# TODO(jreback) what if we have a non-na float as a fill value?
|
| 956 |
+
if allow_fill and notna(fill_value):
|
| 957 |
+
fill_mask = np.asarray(indexer) == -1
|
| 958 |
+
result[fill_mask] = fill_value
|
| 959 |
+
mask = mask ^ fill_mask
|
| 960 |
+
|
| 961 |
+
return self._simple_new(result, mask)
|
| 962 |
+
|
| 963 |
+
# error: Return type "BooleanArray" of "isin" incompatible with return type
|
| 964 |
+
# "ndarray" in supertype "ExtensionArray"
|
| 965 |
+
def isin(self, values: ArrayLike) -> BooleanArray: # type: ignore[override]
|
| 966 |
+
from pandas.core.arrays import BooleanArray
|
| 967 |
+
|
| 968 |
+
# algorithms.isin will eventually convert values to an ndarray, so no extra
|
| 969 |
+
# cost to doing it here first
|
| 970 |
+
values_arr = np.asarray(values)
|
| 971 |
+
result = isin(self._data, values_arr)
|
| 972 |
+
|
| 973 |
+
if self._hasna:
|
| 974 |
+
values_have_NA = values_arr.dtype == object and any(
|
| 975 |
+
val is self.dtype.na_value for val in values_arr
|
| 976 |
+
)
|
| 977 |
+
|
| 978 |
+
# For now, NA does not propagate so set result according to presence of NA,
|
| 979 |
+
# see https://github.com/pandas-dev/pandas/pull/38379 for some discussion
|
| 980 |
+
result[self._mask] = values_have_NA
|
| 981 |
+
|
| 982 |
+
mask = np.zeros(self._data.shape, dtype=bool)
|
| 983 |
+
return BooleanArray(result, mask, copy=False)
|
| 984 |
+
|
| 985 |
+
def copy(self) -> Self:
|
| 986 |
+
data = self._data.copy()
|
| 987 |
+
mask = self._mask.copy()
|
| 988 |
+
return self._simple_new(data, mask)
|
| 989 |
+
|
| 990 |
+
@doc(ExtensionArray.duplicated)
|
| 991 |
+
def duplicated(
|
| 992 |
+
self, keep: Literal["first", "last", False] = "first"
|
| 993 |
+
) -> npt.NDArray[np.bool_]:
|
| 994 |
+
values = self._data
|
| 995 |
+
mask = self._mask
|
| 996 |
+
return algos.duplicated(values, keep=keep, mask=mask)
|
| 997 |
+
|
| 998 |
+
def unique(self) -> Self:
|
| 999 |
+
"""
|
| 1000 |
+
Compute the BaseMaskedArray of unique values.
|
| 1001 |
+
|
| 1002 |
+
Returns
|
| 1003 |
+
-------
|
| 1004 |
+
uniques : BaseMaskedArray
|
| 1005 |
+
"""
|
| 1006 |
+
uniques, mask = algos.unique_with_mask(self._data, self._mask)
|
| 1007 |
+
return self._simple_new(uniques, mask)
|
| 1008 |
+
|
| 1009 |
+
@doc(ExtensionArray.searchsorted)
|
| 1010 |
+
def searchsorted(
|
| 1011 |
+
self,
|
| 1012 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
| 1013 |
+
side: Literal["left", "right"] = "left",
|
| 1014 |
+
sorter: NumpySorter | None = None,
|
| 1015 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
| 1016 |
+
if self._hasna:
|
| 1017 |
+
raise ValueError(
|
| 1018 |
+
"searchsorted requires array to be sorted, which is impossible "
|
| 1019 |
+
"with NAs present."
|
| 1020 |
+
)
|
| 1021 |
+
if isinstance(value, ExtensionArray):
|
| 1022 |
+
value = value.astype(object)
|
| 1023 |
+
# Base class searchsorted would cast to object, which is *much* slower.
|
| 1024 |
+
return self._data.searchsorted(value, side=side, sorter=sorter)
|
| 1025 |
+
|
| 1026 |
+
@doc(ExtensionArray.factorize)
|
| 1027 |
+
def factorize(
|
| 1028 |
+
self,
|
| 1029 |
+
use_na_sentinel: bool = True,
|
| 1030 |
+
) -> tuple[np.ndarray, ExtensionArray]:
|
| 1031 |
+
arr = self._data
|
| 1032 |
+
mask = self._mask
|
| 1033 |
+
|
| 1034 |
+
# Use a sentinel for na; recode and add NA to uniques if necessary below
|
| 1035 |
+
codes, uniques = factorize_array(arr, use_na_sentinel=True, mask=mask)
|
| 1036 |
+
|
| 1037 |
+
# check that factorize_array correctly preserves dtype.
|
| 1038 |
+
assert uniques.dtype == self.dtype.numpy_dtype, (uniques.dtype, self.dtype)
|
| 1039 |
+
|
| 1040 |
+
has_na = mask.any()
|
| 1041 |
+
if use_na_sentinel or not has_na:
|
| 1042 |
+
size = len(uniques)
|
| 1043 |
+
else:
|
| 1044 |
+
# Make room for an NA value
|
| 1045 |
+
size = len(uniques) + 1
|
| 1046 |
+
uniques_mask = np.zeros(size, dtype=bool)
|
| 1047 |
+
if not use_na_sentinel and has_na:
|
| 1048 |
+
na_index = mask.argmax()
|
| 1049 |
+
# Insert na with the proper code
|
| 1050 |
+
if na_index == 0:
|
| 1051 |
+
na_code = np.intp(0)
|
| 1052 |
+
else:
|
| 1053 |
+
na_code = codes[:na_index].max() + 1
|
| 1054 |
+
codes[codes >= na_code] += 1
|
| 1055 |
+
codes[codes == -1] = na_code
|
| 1056 |
+
# dummy value for uniques; not used since uniques_mask will be True
|
| 1057 |
+
uniques = np.insert(uniques, na_code, 0)
|
| 1058 |
+
uniques_mask[na_code] = True
|
| 1059 |
+
uniques_ea = self._simple_new(uniques, uniques_mask)
|
| 1060 |
+
|
| 1061 |
+
return codes, uniques_ea
|
| 1062 |
+
|
| 1063 |
+
@doc(ExtensionArray._values_for_argsort)
|
| 1064 |
+
def _values_for_argsort(self) -> np.ndarray:
|
| 1065 |
+
return self._data
|
| 1066 |
+
|
| 1067 |
+
def value_counts(self, dropna: bool = True) -> Series:
|
| 1068 |
+
"""
|
| 1069 |
+
Returns a Series containing counts of each unique value.
|
| 1070 |
+
|
| 1071 |
+
Parameters
|
| 1072 |
+
----------
|
| 1073 |
+
dropna : bool, default True
|
| 1074 |
+
Don't include counts of missing values.
|
| 1075 |
+
|
| 1076 |
+
Returns
|
| 1077 |
+
-------
|
| 1078 |
+
counts : Series
|
| 1079 |
+
|
| 1080 |
+
See Also
|
| 1081 |
+
--------
|
| 1082 |
+
Series.value_counts
|
| 1083 |
+
"""
|
| 1084 |
+
from pandas import (
|
| 1085 |
+
Index,
|
| 1086 |
+
Series,
|
| 1087 |
+
)
|
| 1088 |
+
from pandas.arrays import IntegerArray
|
| 1089 |
+
|
| 1090 |
+
keys, value_counts, na_counter = algos.value_counts_arraylike(
|
| 1091 |
+
self._data, dropna=dropna, mask=self._mask
|
| 1092 |
+
)
|
| 1093 |
+
mask_index = np.zeros((len(value_counts),), dtype=np.bool_)
|
| 1094 |
+
mask = mask_index.copy()
|
| 1095 |
+
|
| 1096 |
+
if na_counter > 0:
|
| 1097 |
+
mask_index[-1] = True
|
| 1098 |
+
|
| 1099 |
+
arr = IntegerArray(value_counts, mask)
|
| 1100 |
+
index = Index(
|
| 1101 |
+
self.dtype.construct_array_type()(
|
| 1102 |
+
keys, mask_index # type: ignore[arg-type]
|
| 1103 |
+
)
|
| 1104 |
+
)
|
| 1105 |
+
return Series(arr, index=index, name="count", copy=False)
|
| 1106 |
+
|
| 1107 |
+
def _mode(self, dropna: bool = True) -> Self:
|
| 1108 |
+
if dropna:
|
| 1109 |
+
result = mode(self._data, dropna=dropna, mask=self._mask)
|
| 1110 |
+
res_mask = np.zeros(result.shape, dtype=np.bool_)
|
| 1111 |
+
else:
|
| 1112 |
+
result, res_mask = mode(self._data, dropna=dropna, mask=self._mask)
|
| 1113 |
+
result = type(self)(result, res_mask) # type: ignore[arg-type]
|
| 1114 |
+
return result[result.argsort()]
|
| 1115 |
+
|
| 1116 |
+
@doc(ExtensionArray.equals)
|
| 1117 |
+
def equals(self, other) -> bool:
|
| 1118 |
+
if type(self) != type(other):
|
| 1119 |
+
return False
|
| 1120 |
+
if other.dtype != self.dtype:
|
| 1121 |
+
return False
|
| 1122 |
+
|
| 1123 |
+
# GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT
|
| 1124 |
+
# equal.
|
| 1125 |
+
if not np.array_equal(self._mask, other._mask):
|
| 1126 |
+
return False
|
| 1127 |
+
|
| 1128 |
+
left = self._data[~self._mask]
|
| 1129 |
+
right = other._data[~other._mask]
|
| 1130 |
+
return array_equivalent(left, right, strict_nan=True, dtype_equal=True)
|
| 1131 |
+
|
| 1132 |
+
def _quantile(
|
| 1133 |
+
self, qs: npt.NDArray[np.float64], interpolation: str
|
| 1134 |
+
) -> BaseMaskedArray:
|
| 1135 |
+
"""
|
| 1136 |
+
Dispatch to quantile_with_mask, needed because we do not have
|
| 1137 |
+
_from_factorized.
|
| 1138 |
+
|
| 1139 |
+
Notes
|
| 1140 |
+
-----
|
| 1141 |
+
We assume that all impacted cases are 1D-only.
|
| 1142 |
+
"""
|
| 1143 |
+
res = quantile_with_mask(
|
| 1144 |
+
self._data,
|
| 1145 |
+
mask=self._mask,
|
| 1146 |
+
# TODO(GH#40932): na_value_for_dtype(self.dtype.numpy_dtype)
|
| 1147 |
+
# instead of np.nan
|
| 1148 |
+
fill_value=np.nan,
|
| 1149 |
+
qs=qs,
|
| 1150 |
+
interpolation=interpolation,
|
| 1151 |
+
)
|
| 1152 |
+
|
| 1153 |
+
if self._hasna:
|
| 1154 |
+
# Our result mask is all-False unless we are all-NA, in which
|
| 1155 |
+
# case it is all-True.
|
| 1156 |
+
if self.ndim == 2:
|
| 1157 |
+
# I think this should be out_mask=self.isna().all(axis=1)
|
| 1158 |
+
# but am holding off until we have tests
|
| 1159 |
+
raise NotImplementedError
|
| 1160 |
+
if self.isna().all():
|
| 1161 |
+
out_mask = np.ones(res.shape, dtype=bool)
|
| 1162 |
+
|
| 1163 |
+
if is_integer_dtype(self.dtype):
|
| 1164 |
+
# We try to maintain int dtype if possible for not all-na case
|
| 1165 |
+
# as well
|
| 1166 |
+
res = np.zeros(res.shape, dtype=self.dtype.numpy_dtype)
|
| 1167 |
+
else:
|
| 1168 |
+
out_mask = np.zeros(res.shape, dtype=bool)
|
| 1169 |
+
else:
|
| 1170 |
+
out_mask = np.zeros(res.shape, dtype=bool)
|
| 1171 |
+
return self._maybe_mask_result(res, mask=out_mask)
|
| 1172 |
+
|
| 1173 |
+
# ------------------------------------------------------------------
|
| 1174 |
+
# Reductions
|
| 1175 |
+
|
| 1176 |
+
def _reduce(
|
| 1177 |
+
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
|
| 1178 |
+
):
|
| 1179 |
+
if name in {"any", "all", "min", "max", "sum", "prod", "mean", "var", "std"}:
|
| 1180 |
+
result = getattr(self, name)(skipna=skipna, **kwargs)
|
| 1181 |
+
else:
|
| 1182 |
+
# median, skew, kurt, sem
|
| 1183 |
+
data = self._data
|
| 1184 |
+
mask = self._mask
|
| 1185 |
+
op = getattr(nanops, f"nan{name}")
|
| 1186 |
+
axis = kwargs.pop("axis", None)
|
| 1187 |
+
result = op(data, axis=axis, skipna=skipna, mask=mask, **kwargs)
|
| 1188 |
+
|
| 1189 |
+
if keepdims:
|
| 1190 |
+
if isna(result):
|
| 1191 |
+
return self._wrap_na_result(name=name, axis=0, mask_size=(1,))
|
| 1192 |
+
else:
|
| 1193 |
+
result = result.reshape(1)
|
| 1194 |
+
mask = np.zeros(1, dtype=bool)
|
| 1195 |
+
return self._maybe_mask_result(result, mask)
|
| 1196 |
+
|
| 1197 |
+
if isna(result):
|
| 1198 |
+
return libmissing.NA
|
| 1199 |
+
else:
|
| 1200 |
+
return result
|
| 1201 |
+
|
| 1202 |
+
def _wrap_reduction_result(self, name: str, result, *, skipna, axis):
|
| 1203 |
+
if isinstance(result, np.ndarray):
|
| 1204 |
+
if skipna:
|
| 1205 |
+
# we only retain mask for all-NA rows/columns
|
| 1206 |
+
mask = self._mask.all(axis=axis)
|
| 1207 |
+
else:
|
| 1208 |
+
mask = self._mask.any(axis=axis)
|
| 1209 |
+
|
| 1210 |
+
return self._maybe_mask_result(result, mask)
|
| 1211 |
+
return result
|
| 1212 |
+
|
| 1213 |
+
def _wrap_na_result(self, *, name, axis, mask_size):
|
| 1214 |
+
mask = np.ones(mask_size, dtype=bool)
|
| 1215 |
+
|
| 1216 |
+
float_dtyp = "float32" if self.dtype == "Float32" else "float64"
|
| 1217 |
+
if name in ["mean", "median", "var", "std", "skew", "kurt"]:
|
| 1218 |
+
np_dtype = float_dtyp
|
| 1219 |
+
elif name in ["min", "max"] or self.dtype.itemsize == 8:
|
| 1220 |
+
np_dtype = self.dtype.numpy_dtype.name
|
| 1221 |
+
else:
|
| 1222 |
+
is_windows_or_32bit = is_platform_windows() or not IS64
|
| 1223 |
+
int_dtyp = "int32" if is_windows_or_32bit else "int64"
|
| 1224 |
+
uint_dtyp = "uint32" if is_windows_or_32bit else "uint64"
|
| 1225 |
+
np_dtype = {"b": int_dtyp, "i": int_dtyp, "u": uint_dtyp, "f": float_dtyp}[
|
| 1226 |
+
self.dtype.kind
|
| 1227 |
+
]
|
| 1228 |
+
|
| 1229 |
+
value = np.array([1], dtype=np_dtype)
|
| 1230 |
+
return self._maybe_mask_result(value, mask=mask)
|
| 1231 |
+
|
| 1232 |
+
def _wrap_min_count_reduction_result(
|
| 1233 |
+
self, name: str, result, *, skipna, min_count, axis
|
| 1234 |
+
):
|
| 1235 |
+
if min_count == 0 and isinstance(result, np.ndarray):
|
| 1236 |
+
return self._maybe_mask_result(result, np.zeros(result.shape, dtype=bool))
|
| 1237 |
+
return self._wrap_reduction_result(name, result, skipna=skipna, axis=axis)
|
| 1238 |
+
|
| 1239 |
+
def sum(
|
| 1240 |
+
self,
|
| 1241 |
+
*,
|
| 1242 |
+
skipna: bool = True,
|
| 1243 |
+
min_count: int = 0,
|
| 1244 |
+
axis: AxisInt | None = 0,
|
| 1245 |
+
**kwargs,
|
| 1246 |
+
):
|
| 1247 |
+
nv.validate_sum((), kwargs)
|
| 1248 |
+
|
| 1249 |
+
result = masked_reductions.sum(
|
| 1250 |
+
self._data,
|
| 1251 |
+
self._mask,
|
| 1252 |
+
skipna=skipna,
|
| 1253 |
+
min_count=min_count,
|
| 1254 |
+
axis=axis,
|
| 1255 |
+
)
|
| 1256 |
+
return self._wrap_min_count_reduction_result(
|
| 1257 |
+
"sum", result, skipna=skipna, min_count=min_count, axis=axis
|
| 1258 |
+
)
|
| 1259 |
+
|
| 1260 |
+
def prod(
|
| 1261 |
+
self,
|
| 1262 |
+
*,
|
| 1263 |
+
skipna: bool = True,
|
| 1264 |
+
min_count: int = 0,
|
| 1265 |
+
axis: AxisInt | None = 0,
|
| 1266 |
+
**kwargs,
|
| 1267 |
+
):
|
| 1268 |
+
nv.validate_prod((), kwargs)
|
| 1269 |
+
|
| 1270 |
+
result = masked_reductions.prod(
|
| 1271 |
+
self._data,
|
| 1272 |
+
self._mask,
|
| 1273 |
+
skipna=skipna,
|
| 1274 |
+
min_count=min_count,
|
| 1275 |
+
axis=axis,
|
| 1276 |
+
)
|
| 1277 |
+
return self._wrap_min_count_reduction_result(
|
| 1278 |
+
"prod", result, skipna=skipna, min_count=min_count, axis=axis
|
| 1279 |
+
)
|
| 1280 |
+
|
| 1281 |
+
def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
|
| 1282 |
+
nv.validate_mean((), kwargs)
|
| 1283 |
+
result = masked_reductions.mean(
|
| 1284 |
+
self._data,
|
| 1285 |
+
self._mask,
|
| 1286 |
+
skipna=skipna,
|
| 1287 |
+
axis=axis,
|
| 1288 |
+
)
|
| 1289 |
+
return self._wrap_reduction_result("mean", result, skipna=skipna, axis=axis)
|
| 1290 |
+
|
| 1291 |
+
def var(
|
| 1292 |
+
self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs
|
| 1293 |
+
):
|
| 1294 |
+
nv.validate_stat_ddof_func((), kwargs, fname="var")
|
| 1295 |
+
result = masked_reductions.var(
|
| 1296 |
+
self._data,
|
| 1297 |
+
self._mask,
|
| 1298 |
+
skipna=skipna,
|
| 1299 |
+
axis=axis,
|
| 1300 |
+
ddof=ddof,
|
| 1301 |
+
)
|
| 1302 |
+
return self._wrap_reduction_result("var", result, skipna=skipna, axis=axis)
|
| 1303 |
+
|
| 1304 |
+
def std(
|
| 1305 |
+
self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs
|
| 1306 |
+
):
|
| 1307 |
+
nv.validate_stat_ddof_func((), kwargs, fname="std")
|
| 1308 |
+
result = masked_reductions.std(
|
| 1309 |
+
self._data,
|
| 1310 |
+
self._mask,
|
| 1311 |
+
skipna=skipna,
|
| 1312 |
+
axis=axis,
|
| 1313 |
+
ddof=ddof,
|
| 1314 |
+
)
|
| 1315 |
+
return self._wrap_reduction_result("std", result, skipna=skipna, axis=axis)
|
| 1316 |
+
|
| 1317 |
+
def min(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
|
| 1318 |
+
nv.validate_min((), kwargs)
|
| 1319 |
+
result = masked_reductions.min(
|
| 1320 |
+
self._data,
|
| 1321 |
+
self._mask,
|
| 1322 |
+
skipna=skipna,
|
| 1323 |
+
axis=axis,
|
| 1324 |
+
)
|
| 1325 |
+
return self._wrap_reduction_result("min", result, skipna=skipna, axis=axis)
|
| 1326 |
+
|
| 1327 |
+
def max(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
|
| 1328 |
+
nv.validate_max((), kwargs)
|
| 1329 |
+
result = masked_reductions.max(
|
| 1330 |
+
self._data,
|
| 1331 |
+
self._mask,
|
| 1332 |
+
skipna=skipna,
|
| 1333 |
+
axis=axis,
|
| 1334 |
+
)
|
| 1335 |
+
return self._wrap_reduction_result("max", result, skipna=skipna, axis=axis)
|
| 1336 |
+
|
| 1337 |
+
def map(self, mapper, na_action=None):
|
| 1338 |
+
return map_array(self.to_numpy(), mapper, na_action=na_action)
|
| 1339 |
+
|
| 1340 |
+
def any(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
|
| 1341 |
+
"""
|
| 1342 |
+
Return whether any element is truthy.
|
| 1343 |
+
|
| 1344 |
+
Returns False unless there is at least one element that is truthy.
|
| 1345 |
+
By default, NAs are skipped. If ``skipna=False`` is specified and
|
| 1346 |
+
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
|
| 1347 |
+
is used as for logical operations.
|
| 1348 |
+
|
| 1349 |
+
.. versionchanged:: 1.4.0
|
| 1350 |
+
|
| 1351 |
+
Parameters
|
| 1352 |
+
----------
|
| 1353 |
+
skipna : bool, default True
|
| 1354 |
+
Exclude NA values. If the entire array is NA and `skipna` is
|
| 1355 |
+
True, then the result will be False, as for an empty array.
|
| 1356 |
+
If `skipna` is False, the result will still be True if there is
|
| 1357 |
+
at least one element that is truthy, otherwise NA will be returned
|
| 1358 |
+
if there are NA's present.
|
| 1359 |
+
axis : int, optional, default 0
|
| 1360 |
+
**kwargs : any, default None
|
| 1361 |
+
Additional keywords have no effect but might be accepted for
|
| 1362 |
+
compatibility with NumPy.
|
| 1363 |
+
|
| 1364 |
+
Returns
|
| 1365 |
+
-------
|
| 1366 |
+
bool or :attr:`pandas.NA`
|
| 1367 |
+
|
| 1368 |
+
See Also
|
| 1369 |
+
--------
|
| 1370 |
+
numpy.any : Numpy version of this method.
|
| 1371 |
+
BaseMaskedArray.all : Return whether all elements are truthy.
|
| 1372 |
+
|
| 1373 |
+
Examples
|
| 1374 |
+
--------
|
| 1375 |
+
The result indicates whether any element is truthy (and by default
|
| 1376 |
+
skips NAs):
|
| 1377 |
+
|
| 1378 |
+
>>> pd.array([True, False, True]).any()
|
| 1379 |
+
True
|
| 1380 |
+
>>> pd.array([True, False, pd.NA]).any()
|
| 1381 |
+
True
|
| 1382 |
+
>>> pd.array([False, False, pd.NA]).any()
|
| 1383 |
+
False
|
| 1384 |
+
>>> pd.array([], dtype="boolean").any()
|
| 1385 |
+
False
|
| 1386 |
+
>>> pd.array([pd.NA], dtype="boolean").any()
|
| 1387 |
+
False
|
| 1388 |
+
>>> pd.array([pd.NA], dtype="Float64").any()
|
| 1389 |
+
False
|
| 1390 |
+
|
| 1391 |
+
With ``skipna=False``, the result can be NA if this is logically
|
| 1392 |
+
required (whether ``pd.NA`` is True or False influences the result):
|
| 1393 |
+
|
| 1394 |
+
>>> pd.array([True, False, pd.NA]).any(skipna=False)
|
| 1395 |
+
True
|
| 1396 |
+
>>> pd.array([1, 0, pd.NA]).any(skipna=False)
|
| 1397 |
+
True
|
| 1398 |
+
>>> pd.array([False, False, pd.NA]).any(skipna=False)
|
| 1399 |
+
<NA>
|
| 1400 |
+
>>> pd.array([0, 0, pd.NA]).any(skipna=False)
|
| 1401 |
+
<NA>
|
| 1402 |
+
"""
|
| 1403 |
+
nv.validate_any((), kwargs)
|
| 1404 |
+
|
| 1405 |
+
values = self._data.copy()
|
| 1406 |
+
# error: Argument 3 to "putmask" has incompatible type "object";
|
| 1407 |
+
# expected "Union[_SupportsArray[dtype[Any]],
|
| 1408 |
+
# _NestedSequence[_SupportsArray[dtype[Any]]],
|
| 1409 |
+
# bool, int, float, complex, str, bytes,
|
| 1410 |
+
# _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
|
| 1411 |
+
np.putmask(values, self._mask, self._falsey_value) # type: ignore[arg-type]
|
| 1412 |
+
result = values.any()
|
| 1413 |
+
if skipna:
|
| 1414 |
+
return result
|
| 1415 |
+
else:
|
| 1416 |
+
if result or len(self) == 0 or not self._mask.any():
|
| 1417 |
+
return result
|
| 1418 |
+
else:
|
| 1419 |
+
return self.dtype.na_value
|
| 1420 |
+
|
| 1421 |
+
def all(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
|
| 1422 |
+
"""
|
| 1423 |
+
Return whether all elements are truthy.
|
| 1424 |
+
|
| 1425 |
+
Returns True unless there is at least one element that is falsey.
|
| 1426 |
+
By default, NAs are skipped. If ``skipna=False`` is specified and
|
| 1427 |
+
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
|
| 1428 |
+
is used as for logical operations.
|
| 1429 |
+
|
| 1430 |
+
.. versionchanged:: 1.4.0
|
| 1431 |
+
|
| 1432 |
+
Parameters
|
| 1433 |
+
----------
|
| 1434 |
+
skipna : bool, default True
|
| 1435 |
+
Exclude NA values. If the entire array is NA and `skipna` is
|
| 1436 |
+
True, then the result will be True, as for an empty array.
|
| 1437 |
+
If `skipna` is False, the result will still be False if there is
|
| 1438 |
+
at least one element that is falsey, otherwise NA will be returned
|
| 1439 |
+
if there are NA's present.
|
| 1440 |
+
axis : int, optional, default 0
|
| 1441 |
+
**kwargs : any, default None
|
| 1442 |
+
Additional keywords have no effect but might be accepted for
|
| 1443 |
+
compatibility with NumPy.
|
| 1444 |
+
|
| 1445 |
+
Returns
|
| 1446 |
+
-------
|
| 1447 |
+
bool or :attr:`pandas.NA`
|
| 1448 |
+
|
| 1449 |
+
See Also
|
| 1450 |
+
--------
|
| 1451 |
+
numpy.all : Numpy version of this method.
|
| 1452 |
+
BooleanArray.any : Return whether any element is truthy.
|
| 1453 |
+
|
| 1454 |
+
Examples
|
| 1455 |
+
--------
|
| 1456 |
+
The result indicates whether all elements are truthy (and by default
|
| 1457 |
+
skips NAs):
|
| 1458 |
+
|
| 1459 |
+
>>> pd.array([True, True, pd.NA]).all()
|
| 1460 |
+
True
|
| 1461 |
+
>>> pd.array([1, 1, pd.NA]).all()
|
| 1462 |
+
True
|
| 1463 |
+
>>> pd.array([True, False, pd.NA]).all()
|
| 1464 |
+
False
|
| 1465 |
+
>>> pd.array([], dtype="boolean").all()
|
| 1466 |
+
True
|
| 1467 |
+
>>> pd.array([pd.NA], dtype="boolean").all()
|
| 1468 |
+
True
|
| 1469 |
+
>>> pd.array([pd.NA], dtype="Float64").all()
|
| 1470 |
+
True
|
| 1471 |
+
|
| 1472 |
+
With ``skipna=False``, the result can be NA if this is logically
|
| 1473 |
+
required (whether ``pd.NA`` is True or False influences the result):
|
| 1474 |
+
|
| 1475 |
+
>>> pd.array([True, True, pd.NA]).all(skipna=False)
|
| 1476 |
+
<NA>
|
| 1477 |
+
>>> pd.array([1, 1, pd.NA]).all(skipna=False)
|
| 1478 |
+
<NA>
|
| 1479 |
+
>>> pd.array([True, False, pd.NA]).all(skipna=False)
|
| 1480 |
+
False
|
| 1481 |
+
>>> pd.array([1, 0, pd.NA]).all(skipna=False)
|
| 1482 |
+
False
|
| 1483 |
+
"""
|
| 1484 |
+
nv.validate_all((), kwargs)
|
| 1485 |
+
|
| 1486 |
+
values = self._data.copy()
|
| 1487 |
+
# error: Argument 3 to "putmask" has incompatible type "object";
|
| 1488 |
+
# expected "Union[_SupportsArray[dtype[Any]],
|
| 1489 |
+
# _NestedSequence[_SupportsArray[dtype[Any]]],
|
| 1490 |
+
# bool, int, float, complex, str, bytes,
|
| 1491 |
+
# _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
|
| 1492 |
+
np.putmask(values, self._mask, self._truthy_value) # type: ignore[arg-type]
|
| 1493 |
+
result = values.all(axis=axis)
|
| 1494 |
+
|
| 1495 |
+
if skipna:
|
| 1496 |
+
return result
|
| 1497 |
+
else:
|
| 1498 |
+
if not result or len(self) == 0 or not self._mask.any():
|
| 1499 |
+
return result
|
| 1500 |
+
else:
|
| 1501 |
+
return self.dtype.na_value
|
| 1502 |
+
|
| 1503 |
+
def interpolate(
|
| 1504 |
+
self,
|
| 1505 |
+
*,
|
| 1506 |
+
method: InterpolateOptions,
|
| 1507 |
+
axis: int,
|
| 1508 |
+
index,
|
| 1509 |
+
limit,
|
| 1510 |
+
limit_direction,
|
| 1511 |
+
limit_area,
|
| 1512 |
+
copy: bool,
|
| 1513 |
+
**kwargs,
|
| 1514 |
+
) -> FloatingArray:
|
| 1515 |
+
"""
|
| 1516 |
+
See NDFrame.interpolate.__doc__.
|
| 1517 |
+
"""
|
| 1518 |
+
# NB: we return type(self) even if copy=False
|
| 1519 |
+
if self.dtype.kind == "f":
|
| 1520 |
+
if copy:
|
| 1521 |
+
data = self._data.copy()
|
| 1522 |
+
mask = self._mask.copy()
|
| 1523 |
+
else:
|
| 1524 |
+
data = self._data
|
| 1525 |
+
mask = self._mask
|
| 1526 |
+
elif self.dtype.kind in "iu":
|
| 1527 |
+
copy = True
|
| 1528 |
+
data = self._data.astype("f8")
|
| 1529 |
+
mask = self._mask.copy()
|
| 1530 |
+
else:
|
| 1531 |
+
raise NotImplementedError(
|
| 1532 |
+
f"interpolate is not implemented for dtype={self.dtype}"
|
| 1533 |
+
)
|
| 1534 |
+
|
| 1535 |
+
missing.interpolate_2d_inplace(
|
| 1536 |
+
data,
|
| 1537 |
+
method=method,
|
| 1538 |
+
axis=0,
|
| 1539 |
+
index=index,
|
| 1540 |
+
limit=limit,
|
| 1541 |
+
limit_direction=limit_direction,
|
| 1542 |
+
limit_area=limit_area,
|
| 1543 |
+
mask=mask,
|
| 1544 |
+
**kwargs,
|
| 1545 |
+
)
|
| 1546 |
+
if not copy:
|
| 1547 |
+
return self # type: ignore[return-value]
|
| 1548 |
+
if self.dtype.kind == "f":
|
| 1549 |
+
return type(self)._simple_new(data, mask) # type: ignore[return-value]
|
| 1550 |
+
else:
|
| 1551 |
+
from pandas.core.arrays import FloatingArray
|
| 1552 |
+
|
| 1553 |
+
return FloatingArray._simple_new(data, mask)
|
| 1554 |
+
|
| 1555 |
+
def _accumulate(
|
| 1556 |
+
self, name: str, *, skipna: bool = True, **kwargs
|
| 1557 |
+
) -> BaseMaskedArray:
|
| 1558 |
+
data = self._data
|
| 1559 |
+
mask = self._mask
|
| 1560 |
+
|
| 1561 |
+
op = getattr(masked_accumulations, name)
|
| 1562 |
+
data, mask = op(data, mask, skipna=skipna, **kwargs)
|
| 1563 |
+
|
| 1564 |
+
return self._simple_new(data, mask)
|
| 1565 |
+
|
| 1566 |
+
# ------------------------------------------------------------------
|
| 1567 |
+
# GroupBy Methods
|
| 1568 |
+
|
| 1569 |
+
def _groupby_op(
|
| 1570 |
+
self,
|
| 1571 |
+
*,
|
| 1572 |
+
how: str,
|
| 1573 |
+
has_dropped_na: bool,
|
| 1574 |
+
min_count: int,
|
| 1575 |
+
ngroups: int,
|
| 1576 |
+
ids: npt.NDArray[np.intp],
|
| 1577 |
+
**kwargs,
|
| 1578 |
+
):
|
| 1579 |
+
from pandas.core.groupby.ops import WrappedCythonOp
|
| 1580 |
+
|
| 1581 |
+
kind = WrappedCythonOp.get_kind_from_how(how)
|
| 1582 |
+
op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
|
| 1583 |
+
|
| 1584 |
+
# libgroupby functions are responsible for NOT altering mask
|
| 1585 |
+
mask = self._mask
|
| 1586 |
+
if op.kind != "aggregate":
|
| 1587 |
+
result_mask = mask.copy()
|
| 1588 |
+
else:
|
| 1589 |
+
result_mask = np.zeros(ngroups, dtype=bool)
|
| 1590 |
+
|
| 1591 |
+
if how == "rank" and kwargs.get("na_option") in ["top", "bottom"]:
|
| 1592 |
+
result_mask[:] = False
|
| 1593 |
+
|
| 1594 |
+
res_values = op._cython_op_ndim_compat(
|
| 1595 |
+
self._data,
|
| 1596 |
+
min_count=min_count,
|
| 1597 |
+
ngroups=ngroups,
|
| 1598 |
+
comp_ids=ids,
|
| 1599 |
+
mask=mask,
|
| 1600 |
+
result_mask=result_mask,
|
| 1601 |
+
**kwargs,
|
| 1602 |
+
)
|
| 1603 |
+
|
| 1604 |
+
if op.how == "ohlc":
|
| 1605 |
+
arity = op._cython_arity.get(op.how, 1)
|
| 1606 |
+
result_mask = np.tile(result_mask, (arity, 1)).T
|
| 1607 |
+
|
| 1608 |
+
if op.how in ["idxmin", "idxmax"]:
|
| 1609 |
+
# Result values are indexes to take, keep as ndarray
|
| 1610 |
+
return res_values
|
| 1611 |
+
else:
|
| 1612 |
+
# res_values should already have the correct dtype, we just need to
|
| 1613 |
+
# wrap in a MaskedArray
|
| 1614 |
+
return self._maybe_mask_result(res_values, result_mask)
|
| 1615 |
+
|
| 1616 |
+
|
| 1617 |
+
def transpose_homogeneous_masked_arrays(
|
| 1618 |
+
masked_arrays: Sequence[BaseMaskedArray],
|
| 1619 |
+
) -> list[BaseMaskedArray]:
|
| 1620 |
+
"""Transpose masked arrays in a list, but faster.
|
| 1621 |
+
|
| 1622 |
+
Input should be a list of 1-dim masked arrays of equal length and all have the
|
| 1623 |
+
same dtype. The caller is responsible for ensuring validity of input data.
|
| 1624 |
+
"""
|
| 1625 |
+
masked_arrays = list(masked_arrays)
|
| 1626 |
+
dtype = masked_arrays[0].dtype
|
| 1627 |
+
|
| 1628 |
+
values = [arr._data.reshape(1, -1) for arr in masked_arrays]
|
| 1629 |
+
transposed_values = np.concatenate(
|
| 1630 |
+
values,
|
| 1631 |
+
axis=0,
|
| 1632 |
+
out=np.empty(
|
| 1633 |
+
(len(masked_arrays), len(masked_arrays[0])),
|
| 1634 |
+
order="F",
|
| 1635 |
+
dtype=dtype.numpy_dtype,
|
| 1636 |
+
),
|
| 1637 |
+
)
|
| 1638 |
+
|
| 1639 |
+
masks = [arr._mask.reshape(1, -1) for arr in masked_arrays]
|
| 1640 |
+
transposed_masks = np.concatenate(
|
| 1641 |
+
masks, axis=0, out=np.empty_like(transposed_values, dtype=bool)
|
| 1642 |
+
)
|
| 1643 |
+
|
| 1644 |
+
arr_type = dtype.construct_array_type()
|
| 1645 |
+
transposed_arrays: list[BaseMaskedArray] = []
|
| 1646 |
+
for i in range(transposed_values.shape[1]):
|
| 1647 |
+
transposed_arr = arr_type(transposed_values[:, i], mask=transposed_masks[:, i])
|
| 1648 |
+
transposed_arrays.append(transposed_arr)
|
| 1649 |
+
|
| 1650 |
+
return transposed_arrays
|
videollama2/lib/python3.10/site-packages/pandas/core/arrays/numeric.py
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import numbers
|
| 4 |
+
from typing import (
|
| 5 |
+
TYPE_CHECKING,
|
| 6 |
+
Any,
|
| 7 |
+
Callable,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
from pandas._libs import (
|
| 13 |
+
lib,
|
| 14 |
+
missing as libmissing,
|
| 15 |
+
)
|
| 16 |
+
from pandas.errors import AbstractMethodError
|
| 17 |
+
from pandas.util._decorators import cache_readonly
|
| 18 |
+
|
| 19 |
+
from pandas.core.dtypes.common import (
|
| 20 |
+
is_integer_dtype,
|
| 21 |
+
is_string_dtype,
|
| 22 |
+
pandas_dtype,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
from pandas.core.arrays.masked import (
|
| 26 |
+
BaseMaskedArray,
|
| 27 |
+
BaseMaskedDtype,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
if TYPE_CHECKING:
|
| 31 |
+
from collections.abc import Mapping
|
| 32 |
+
|
| 33 |
+
import pyarrow
|
| 34 |
+
|
| 35 |
+
from pandas._typing import (
|
| 36 |
+
Dtype,
|
| 37 |
+
DtypeObj,
|
| 38 |
+
Self,
|
| 39 |
+
npt,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class NumericDtype(BaseMaskedDtype):
|
| 44 |
+
_default_np_dtype: np.dtype
|
| 45 |
+
_checker: Callable[[Any], bool] # is_foo_dtype
|
| 46 |
+
|
| 47 |
+
def __repr__(self) -> str:
|
| 48 |
+
return f"{self.name}Dtype()"
|
| 49 |
+
|
| 50 |
+
@cache_readonly
|
| 51 |
+
def is_signed_integer(self) -> bool:
|
| 52 |
+
return self.kind == "i"
|
| 53 |
+
|
| 54 |
+
@cache_readonly
|
| 55 |
+
def is_unsigned_integer(self) -> bool:
|
| 56 |
+
return self.kind == "u"
|
| 57 |
+
|
| 58 |
+
@property
|
| 59 |
+
def _is_numeric(self) -> bool:
|
| 60 |
+
return True
|
| 61 |
+
|
| 62 |
+
def __from_arrow__(
|
| 63 |
+
self, array: pyarrow.Array | pyarrow.ChunkedArray
|
| 64 |
+
) -> BaseMaskedArray:
|
| 65 |
+
"""
|
| 66 |
+
Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
|
| 67 |
+
"""
|
| 68 |
+
import pyarrow
|
| 69 |
+
|
| 70 |
+
from pandas.core.arrays.arrow._arrow_utils import (
|
| 71 |
+
pyarrow_array_to_numpy_and_mask,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
array_class = self.construct_array_type()
|
| 75 |
+
|
| 76 |
+
pyarrow_type = pyarrow.from_numpy_dtype(self.type)
|
| 77 |
+
if not array.type.equals(pyarrow_type) and not pyarrow.types.is_null(
|
| 78 |
+
array.type
|
| 79 |
+
):
|
| 80 |
+
# test_from_arrow_type_error raise for string, but allow
|
| 81 |
+
# through itemsize conversion GH#31896
|
| 82 |
+
rt_dtype = pandas_dtype(array.type.to_pandas_dtype())
|
| 83 |
+
if rt_dtype.kind not in "iuf":
|
| 84 |
+
# Could allow "c" or potentially disallow float<->int conversion,
|
| 85 |
+
# but at the moment we specifically test that uint<->int works
|
| 86 |
+
raise TypeError(
|
| 87 |
+
f"Expected array of {self} type, got {array.type} instead"
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
array = array.cast(pyarrow_type)
|
| 91 |
+
|
| 92 |
+
if isinstance(array, pyarrow.ChunkedArray):
|
| 93 |
+
# TODO this "if" can be removed when requiring pyarrow >= 10.0, which fixed
|
| 94 |
+
# combine_chunks for empty arrays https://github.com/apache/arrow/pull/13757
|
| 95 |
+
if array.num_chunks == 0:
|
| 96 |
+
array = pyarrow.array([], type=array.type)
|
| 97 |
+
else:
|
| 98 |
+
array = array.combine_chunks()
|
| 99 |
+
|
| 100 |
+
data, mask = pyarrow_array_to_numpy_and_mask(array, dtype=self.numpy_dtype)
|
| 101 |
+
return array_class(data.copy(), ~mask, copy=False)
|
| 102 |
+
|
| 103 |
+
@classmethod
|
| 104 |
+
def _get_dtype_mapping(cls) -> Mapping[np.dtype, NumericDtype]:
|
| 105 |
+
raise AbstractMethodError(cls)
|
| 106 |
+
|
| 107 |
+
@classmethod
|
| 108 |
+
def _standardize_dtype(cls, dtype: NumericDtype | str | np.dtype) -> NumericDtype:
|
| 109 |
+
"""
|
| 110 |
+
Convert a string representation or a numpy dtype to NumericDtype.
|
| 111 |
+
"""
|
| 112 |
+
if isinstance(dtype, str) and (dtype.startswith(("Int", "UInt", "Float"))):
|
| 113 |
+
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
|
| 114 |
+
# https://github.com/numpy/numpy/pull/7476
|
| 115 |
+
dtype = dtype.lower()
|
| 116 |
+
|
| 117 |
+
if not isinstance(dtype, NumericDtype):
|
| 118 |
+
mapping = cls._get_dtype_mapping()
|
| 119 |
+
try:
|
| 120 |
+
dtype = mapping[np.dtype(dtype)]
|
| 121 |
+
except KeyError as err:
|
| 122 |
+
raise ValueError(f"invalid dtype specified {dtype}") from err
|
| 123 |
+
return dtype
|
| 124 |
+
|
| 125 |
+
@classmethod
|
| 126 |
+
def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
|
| 127 |
+
"""
|
| 128 |
+
Safely cast the values to the given dtype.
|
| 129 |
+
|
| 130 |
+
"safe" in this context means the casting is lossless.
|
| 131 |
+
"""
|
| 132 |
+
raise AbstractMethodError(cls)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def _coerce_to_data_and_mask(
|
| 136 |
+
values, dtype, copy: bool, dtype_cls: type[NumericDtype], default_dtype: np.dtype
|
| 137 |
+
):
|
| 138 |
+
checker = dtype_cls._checker
|
| 139 |
+
|
| 140 |
+
mask = None
|
| 141 |
+
inferred_type = None
|
| 142 |
+
|
| 143 |
+
if dtype is None and hasattr(values, "dtype"):
|
| 144 |
+
if checker(values.dtype):
|
| 145 |
+
dtype = values.dtype
|
| 146 |
+
|
| 147 |
+
if dtype is not None:
|
| 148 |
+
dtype = dtype_cls._standardize_dtype(dtype)
|
| 149 |
+
|
| 150 |
+
cls = dtype_cls.construct_array_type()
|
| 151 |
+
if isinstance(values, cls):
|
| 152 |
+
values, mask = values._data, values._mask
|
| 153 |
+
if dtype is not None:
|
| 154 |
+
values = values.astype(dtype.numpy_dtype, copy=False)
|
| 155 |
+
|
| 156 |
+
if copy:
|
| 157 |
+
values = values.copy()
|
| 158 |
+
mask = mask.copy()
|
| 159 |
+
return values, mask, dtype, inferred_type
|
| 160 |
+
|
| 161 |
+
original = values
|
| 162 |
+
if not copy:
|
| 163 |
+
values = np.asarray(values)
|
| 164 |
+
else:
|
| 165 |
+
values = np.array(values, copy=copy)
|
| 166 |
+
inferred_type = None
|
| 167 |
+
if values.dtype == object or is_string_dtype(values.dtype):
|
| 168 |
+
inferred_type = lib.infer_dtype(values, skipna=True)
|
| 169 |
+
if inferred_type == "boolean" and dtype is None:
|
| 170 |
+
name = dtype_cls.__name__.strip("_")
|
| 171 |
+
raise TypeError(f"{values.dtype} cannot be converted to {name}")
|
| 172 |
+
|
| 173 |
+
elif values.dtype.kind == "b" and checker(dtype):
|
| 174 |
+
if not copy:
|
| 175 |
+
values = np.asarray(values, dtype=default_dtype)
|
| 176 |
+
else:
|
| 177 |
+
values = np.array(values, dtype=default_dtype, copy=copy)
|
| 178 |
+
|
| 179 |
+
elif values.dtype.kind not in "iuf":
|
| 180 |
+
name = dtype_cls.__name__.strip("_")
|
| 181 |
+
raise TypeError(f"{values.dtype} cannot be converted to {name}")
|
| 182 |
+
|
| 183 |
+
if values.ndim != 1:
|
| 184 |
+
raise TypeError("values must be a 1D list-like")
|
| 185 |
+
|
| 186 |
+
if mask is None:
|
| 187 |
+
if values.dtype.kind in "iu":
|
| 188 |
+
# fastpath
|
| 189 |
+
mask = np.zeros(len(values), dtype=np.bool_)
|
| 190 |
+
else:
|
| 191 |
+
mask = libmissing.is_numeric_na(values)
|
| 192 |
+
else:
|
| 193 |
+
assert len(mask) == len(values)
|
| 194 |
+
|
| 195 |
+
if mask.ndim != 1:
|
| 196 |
+
raise TypeError("mask must be a 1D list-like")
|
| 197 |
+
|
| 198 |
+
# infer dtype if needed
|
| 199 |
+
if dtype is None:
|
| 200 |
+
dtype = default_dtype
|
| 201 |
+
else:
|
| 202 |
+
dtype = dtype.numpy_dtype
|
| 203 |
+
|
| 204 |
+
if is_integer_dtype(dtype) and values.dtype.kind == "f" and len(values) > 0:
|
| 205 |
+
if mask.all():
|
| 206 |
+
values = np.ones(values.shape, dtype=dtype)
|
| 207 |
+
else:
|
| 208 |
+
idx = np.nanargmax(values)
|
| 209 |
+
if int(values[idx]) != original[idx]:
|
| 210 |
+
# We have ints that lost precision during the cast.
|
| 211 |
+
inferred_type = lib.infer_dtype(original, skipna=True)
|
| 212 |
+
if (
|
| 213 |
+
inferred_type not in ["floating", "mixed-integer-float"]
|
| 214 |
+
and not mask.any()
|
| 215 |
+
):
|
| 216 |
+
values = np.asarray(original, dtype=dtype)
|
| 217 |
+
else:
|
| 218 |
+
values = np.asarray(original, dtype="object")
|
| 219 |
+
|
| 220 |
+
# we copy as need to coerce here
|
| 221 |
+
if mask.any():
|
| 222 |
+
values = values.copy()
|
| 223 |
+
values[mask] = cls._internal_fill_value
|
| 224 |
+
if inferred_type in ("string", "unicode"):
|
| 225 |
+
# casts from str are always safe since they raise
|
| 226 |
+
# a ValueError if the str cannot be parsed into a float
|
| 227 |
+
values = values.astype(dtype, copy=copy)
|
| 228 |
+
else:
|
| 229 |
+
values = dtype_cls._safe_cast(values, dtype, copy=False)
|
| 230 |
+
|
| 231 |
+
return values, mask, dtype, inferred_type
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
class NumericArray(BaseMaskedArray):
|
| 235 |
+
"""
|
| 236 |
+
Base class for IntegerArray and FloatingArray.
|
| 237 |
+
"""
|
| 238 |
+
|
| 239 |
+
_dtype_cls: type[NumericDtype]
|
| 240 |
+
|
| 241 |
+
def __init__(
|
| 242 |
+
self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
|
| 243 |
+
) -> None:
|
| 244 |
+
checker = self._dtype_cls._checker
|
| 245 |
+
if not (isinstance(values, np.ndarray) and checker(values.dtype)):
|
| 246 |
+
descr = (
|
| 247 |
+
"floating"
|
| 248 |
+
if self._dtype_cls.kind == "f" # type: ignore[comparison-overlap]
|
| 249 |
+
else "integer"
|
| 250 |
+
)
|
| 251 |
+
raise TypeError(
|
| 252 |
+
f"values should be {descr} numpy array. Use "
|
| 253 |
+
"the 'pd.array' function instead"
|
| 254 |
+
)
|
| 255 |
+
if values.dtype == np.float16:
|
| 256 |
+
# If we don't raise here, then accessing self.dtype would raise
|
| 257 |
+
raise TypeError("FloatingArray does not support np.float16 dtype.")
|
| 258 |
+
|
| 259 |
+
super().__init__(values, mask, copy=copy)
|
| 260 |
+
|
| 261 |
+
@cache_readonly
|
| 262 |
+
def dtype(self) -> NumericDtype:
|
| 263 |
+
mapping = self._dtype_cls._get_dtype_mapping()
|
| 264 |
+
return mapping[self._data.dtype]
|
| 265 |
+
|
| 266 |
+
@classmethod
|
| 267 |
+
def _coerce_to_array(
|
| 268 |
+
cls, value, *, dtype: DtypeObj, copy: bool = False
|
| 269 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
| 270 |
+
dtype_cls = cls._dtype_cls
|
| 271 |
+
default_dtype = dtype_cls._default_np_dtype
|
| 272 |
+
values, mask, _, _ = _coerce_to_data_and_mask(
|
| 273 |
+
value, dtype, copy, dtype_cls, default_dtype
|
| 274 |
+
)
|
| 275 |
+
return values, mask
|
| 276 |
+
|
| 277 |
+
@classmethod
|
| 278 |
+
def _from_sequence_of_strings(
|
| 279 |
+
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
|
| 280 |
+
) -> Self:
|
| 281 |
+
from pandas.core.tools.numeric import to_numeric
|
| 282 |
+
|
| 283 |
+
scalars = to_numeric(strings, errors="raise", dtype_backend="numpy_nullable")
|
| 284 |
+
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
|
| 285 |
+
|
| 286 |
+
_HANDLED_TYPES = (np.ndarray, numbers.Number)
|
videollama2/lib/python3.10/site-packages/pandas/core/arrays/period.py
ADDED
|
@@ -0,0 +1,1313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from datetime import timedelta
|
| 4 |
+
import operator
|
| 5 |
+
from typing import (
|
| 6 |
+
TYPE_CHECKING,
|
| 7 |
+
Any,
|
| 8 |
+
Callable,
|
| 9 |
+
Literal,
|
| 10 |
+
TypeVar,
|
| 11 |
+
cast,
|
| 12 |
+
overload,
|
| 13 |
+
)
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
|
| 18 |
+
from pandas._libs import (
|
| 19 |
+
algos as libalgos,
|
| 20 |
+
lib,
|
| 21 |
+
)
|
| 22 |
+
from pandas._libs.arrays import NDArrayBacked
|
| 23 |
+
from pandas._libs.tslibs import (
|
| 24 |
+
BaseOffset,
|
| 25 |
+
NaT,
|
| 26 |
+
NaTType,
|
| 27 |
+
Timedelta,
|
| 28 |
+
add_overflowsafe,
|
| 29 |
+
astype_overflowsafe,
|
| 30 |
+
dt64arr_to_periodarr as c_dt64arr_to_periodarr,
|
| 31 |
+
get_unit_from_dtype,
|
| 32 |
+
iNaT,
|
| 33 |
+
parsing,
|
| 34 |
+
period as libperiod,
|
| 35 |
+
to_offset,
|
| 36 |
+
)
|
| 37 |
+
from pandas._libs.tslibs.dtypes import (
|
| 38 |
+
FreqGroup,
|
| 39 |
+
PeriodDtypeBase,
|
| 40 |
+
freq_to_period_freqstr,
|
| 41 |
+
)
|
| 42 |
+
from pandas._libs.tslibs.fields import isleapyear_arr
|
| 43 |
+
from pandas._libs.tslibs.offsets import (
|
| 44 |
+
Tick,
|
| 45 |
+
delta_to_tick,
|
| 46 |
+
)
|
| 47 |
+
from pandas._libs.tslibs.period import (
|
| 48 |
+
DIFFERENT_FREQ,
|
| 49 |
+
IncompatibleFrequency,
|
| 50 |
+
Period,
|
| 51 |
+
get_period_field_arr,
|
| 52 |
+
period_asfreq_arr,
|
| 53 |
+
)
|
| 54 |
+
from pandas.util._decorators import (
|
| 55 |
+
cache_readonly,
|
| 56 |
+
doc,
|
| 57 |
+
)
|
| 58 |
+
from pandas.util._exceptions import find_stack_level
|
| 59 |
+
|
| 60 |
+
from pandas.core.dtypes.common import (
|
| 61 |
+
ensure_object,
|
| 62 |
+
pandas_dtype,
|
| 63 |
+
)
|
| 64 |
+
from pandas.core.dtypes.dtypes import (
|
| 65 |
+
DatetimeTZDtype,
|
| 66 |
+
PeriodDtype,
|
| 67 |
+
)
|
| 68 |
+
from pandas.core.dtypes.generic import (
|
| 69 |
+
ABCIndex,
|
| 70 |
+
ABCPeriodIndex,
|
| 71 |
+
ABCSeries,
|
| 72 |
+
ABCTimedeltaArray,
|
| 73 |
+
)
|
| 74 |
+
from pandas.core.dtypes.missing import isna
|
| 75 |
+
|
| 76 |
+
from pandas.core.arrays import datetimelike as dtl
|
| 77 |
+
import pandas.core.common as com
|
| 78 |
+
|
| 79 |
+
if TYPE_CHECKING:
|
| 80 |
+
from collections.abc import Sequence
|
| 81 |
+
|
| 82 |
+
from pandas._typing import (
|
| 83 |
+
AnyArrayLike,
|
| 84 |
+
Dtype,
|
| 85 |
+
FillnaOptions,
|
| 86 |
+
NpDtype,
|
| 87 |
+
NumpySorter,
|
| 88 |
+
NumpyValueArrayLike,
|
| 89 |
+
Self,
|
| 90 |
+
npt,
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
from pandas.core.arrays import (
|
| 94 |
+
DatetimeArray,
|
| 95 |
+
TimedeltaArray,
|
| 96 |
+
)
|
| 97 |
+
from pandas.core.arrays.base import ExtensionArray
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
_shared_doc_kwargs = {
|
| 104 |
+
"klass": "PeriodArray",
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _field_accessor(name: str, docstring: str | None = None):
|
| 109 |
+
def f(self):
|
| 110 |
+
base = self.dtype._dtype_code
|
| 111 |
+
result = get_period_field_arr(name, self.asi8, base)
|
| 112 |
+
return result
|
| 113 |
+
|
| 114 |
+
f.__name__ = name
|
| 115 |
+
f.__doc__ = docstring
|
| 116 |
+
return property(f)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
|
| 120 |
+
# incompatible with definition in base class "ExtensionArray"
|
| 121 |
+
class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): # type: ignore[misc]
|
| 122 |
+
"""
|
| 123 |
+
Pandas ExtensionArray for storing Period data.
|
| 124 |
+
|
| 125 |
+
Users should use :func:`~pandas.array` to create new instances.
|
| 126 |
+
|
| 127 |
+
Parameters
|
| 128 |
+
----------
|
| 129 |
+
values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
|
| 130 |
+
The data to store. These should be arrays that can be directly
|
| 131 |
+
converted to ordinals without inference or copy (PeriodArray,
|
| 132 |
+
ndarray[int64]), or a box around such an array (Series[period],
|
| 133 |
+
PeriodIndex).
|
| 134 |
+
dtype : PeriodDtype, optional
|
| 135 |
+
A PeriodDtype instance from which to extract a `freq`. If both
|
| 136 |
+
`freq` and `dtype` are specified, then the frequencies must match.
|
| 137 |
+
freq : str or DateOffset
|
| 138 |
+
The `freq` to use for the array. Mostly applicable when `values`
|
| 139 |
+
is an ndarray of integers, when `freq` is required. When `values`
|
| 140 |
+
is a PeriodArray (or box around), it's checked that ``values.freq``
|
| 141 |
+
matches `freq`.
|
| 142 |
+
copy : bool, default False
|
| 143 |
+
Whether to copy the ordinals before storing.
|
| 144 |
+
|
| 145 |
+
Attributes
|
| 146 |
+
----------
|
| 147 |
+
None
|
| 148 |
+
|
| 149 |
+
Methods
|
| 150 |
+
-------
|
| 151 |
+
None
|
| 152 |
+
|
| 153 |
+
See Also
|
| 154 |
+
--------
|
| 155 |
+
Period: Represents a period of time.
|
| 156 |
+
PeriodIndex : Immutable Index for period data.
|
| 157 |
+
period_range: Create a fixed-frequency PeriodArray.
|
| 158 |
+
array: Construct a pandas array.
|
| 159 |
+
|
| 160 |
+
Notes
|
| 161 |
+
-----
|
| 162 |
+
There are two components to a PeriodArray
|
| 163 |
+
|
| 164 |
+
- ordinals : integer ndarray
|
| 165 |
+
- freq : pd.tseries.offsets.Offset
|
| 166 |
+
|
| 167 |
+
The values are physically stored as a 1-D ndarray of integers. These are
|
| 168 |
+
called "ordinals" and represent some kind of offset from a base.
|
| 169 |
+
|
| 170 |
+
The `freq` indicates the span covered by each element of the array.
|
| 171 |
+
All elements in the PeriodArray have the same `freq`.
|
| 172 |
+
|
| 173 |
+
Examples
|
| 174 |
+
--------
|
| 175 |
+
>>> pd.arrays.PeriodArray(pd.PeriodIndex(['2023-01-01',
|
| 176 |
+
... '2023-01-02'], freq='D'))
|
| 177 |
+
<PeriodArray>
|
| 178 |
+
['2023-01-01', '2023-01-02']
|
| 179 |
+
Length: 2, dtype: period[D]
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
# array priority higher than numpy scalars
|
| 183 |
+
__array_priority__ = 1000
|
| 184 |
+
_typ = "periodarray" # ABCPeriodArray
|
| 185 |
+
_internal_fill_value = np.int64(iNaT)
|
| 186 |
+
_recognized_scalars = (Period,)
|
| 187 |
+
_is_recognized_dtype = lambda x: isinstance(
|
| 188 |
+
x, PeriodDtype
|
| 189 |
+
) # check_compatible_with checks freq match
|
| 190 |
+
_infer_matches = ("period",)
|
| 191 |
+
|
| 192 |
+
@property
|
| 193 |
+
def _scalar_type(self) -> type[Period]:
|
| 194 |
+
return Period
|
| 195 |
+
|
| 196 |
+
# Names others delegate to us
|
| 197 |
+
_other_ops: list[str] = []
|
| 198 |
+
_bool_ops: list[str] = ["is_leap_year"]
|
| 199 |
+
_object_ops: list[str] = ["start_time", "end_time", "freq"]
|
| 200 |
+
_field_ops: list[str] = [
|
| 201 |
+
"year",
|
| 202 |
+
"month",
|
| 203 |
+
"day",
|
| 204 |
+
"hour",
|
| 205 |
+
"minute",
|
| 206 |
+
"second",
|
| 207 |
+
"weekofyear",
|
| 208 |
+
"weekday",
|
| 209 |
+
"week",
|
| 210 |
+
"dayofweek",
|
| 211 |
+
"day_of_week",
|
| 212 |
+
"dayofyear",
|
| 213 |
+
"day_of_year",
|
| 214 |
+
"quarter",
|
| 215 |
+
"qyear",
|
| 216 |
+
"days_in_month",
|
| 217 |
+
"daysinmonth",
|
| 218 |
+
]
|
| 219 |
+
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops
|
| 220 |
+
_datetimelike_methods: list[str] = ["strftime", "to_timestamp", "asfreq"]
|
| 221 |
+
|
| 222 |
+
_dtype: PeriodDtype
|
| 223 |
+
|
| 224 |
+
# --------------------------------------------------------------------
|
| 225 |
+
# Constructors
|
| 226 |
+
|
| 227 |
+
def __init__(
|
| 228 |
+
self, values, dtype: Dtype | None = None, freq=None, copy: bool = False
|
| 229 |
+
) -> None:
|
| 230 |
+
if freq is not None:
|
| 231 |
+
# GH#52462
|
| 232 |
+
warnings.warn(
|
| 233 |
+
"The 'freq' keyword in the PeriodArray constructor is deprecated "
|
| 234 |
+
"and will be removed in a future version. Pass 'dtype' instead",
|
| 235 |
+
FutureWarning,
|
| 236 |
+
stacklevel=find_stack_level(),
|
| 237 |
+
)
|
| 238 |
+
freq = validate_dtype_freq(dtype, freq)
|
| 239 |
+
dtype = PeriodDtype(freq)
|
| 240 |
+
|
| 241 |
+
if dtype is not None:
|
| 242 |
+
dtype = pandas_dtype(dtype)
|
| 243 |
+
if not isinstance(dtype, PeriodDtype):
|
| 244 |
+
raise ValueError(f"Invalid dtype {dtype} for PeriodArray")
|
| 245 |
+
|
| 246 |
+
if isinstance(values, ABCSeries):
|
| 247 |
+
values = values._values
|
| 248 |
+
if not isinstance(values, type(self)):
|
| 249 |
+
raise TypeError("Incorrect dtype")
|
| 250 |
+
|
| 251 |
+
elif isinstance(values, ABCPeriodIndex):
|
| 252 |
+
values = values._values
|
| 253 |
+
|
| 254 |
+
if isinstance(values, type(self)):
|
| 255 |
+
if dtype is not None and dtype != values.dtype:
|
| 256 |
+
raise raise_on_incompatible(values, dtype.freq)
|
| 257 |
+
values, dtype = values._ndarray, values.dtype
|
| 258 |
+
|
| 259 |
+
if not copy:
|
| 260 |
+
values = np.asarray(values, dtype="int64")
|
| 261 |
+
else:
|
| 262 |
+
values = np.array(values, dtype="int64", copy=copy)
|
| 263 |
+
if dtype is None:
|
| 264 |
+
raise ValueError("dtype is not specified and cannot be inferred")
|
| 265 |
+
dtype = cast(PeriodDtype, dtype)
|
| 266 |
+
NDArrayBacked.__init__(self, values, dtype)
|
| 267 |
+
|
| 268 |
+
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
|
| 269 |
+
@classmethod
|
| 270 |
+
def _simple_new( # type: ignore[override]
|
| 271 |
+
cls,
|
| 272 |
+
values: npt.NDArray[np.int64],
|
| 273 |
+
dtype: PeriodDtype,
|
| 274 |
+
) -> Self:
|
| 275 |
+
# alias for PeriodArray.__init__
|
| 276 |
+
assertion_msg = "Should be numpy array of type i8"
|
| 277 |
+
assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg
|
| 278 |
+
return cls(values, dtype=dtype)
|
| 279 |
+
|
| 280 |
+
@classmethod
|
| 281 |
+
def _from_sequence(
|
| 282 |
+
cls,
|
| 283 |
+
scalars,
|
| 284 |
+
*,
|
| 285 |
+
dtype: Dtype | None = None,
|
| 286 |
+
copy: bool = False,
|
| 287 |
+
) -> Self:
|
| 288 |
+
if dtype is not None:
|
| 289 |
+
dtype = pandas_dtype(dtype)
|
| 290 |
+
if dtype and isinstance(dtype, PeriodDtype):
|
| 291 |
+
freq = dtype.freq
|
| 292 |
+
else:
|
| 293 |
+
freq = None
|
| 294 |
+
|
| 295 |
+
if isinstance(scalars, cls):
|
| 296 |
+
validate_dtype_freq(scalars.dtype, freq)
|
| 297 |
+
if copy:
|
| 298 |
+
scalars = scalars.copy()
|
| 299 |
+
return scalars
|
| 300 |
+
|
| 301 |
+
periods = np.asarray(scalars, dtype=object)
|
| 302 |
+
|
| 303 |
+
freq = freq or libperiod.extract_freq(periods)
|
| 304 |
+
ordinals = libperiod.extract_ordinals(periods, freq)
|
| 305 |
+
dtype = PeriodDtype(freq)
|
| 306 |
+
return cls(ordinals, dtype=dtype)
|
| 307 |
+
|
| 308 |
+
@classmethod
|
| 309 |
+
def _from_sequence_of_strings(
|
| 310 |
+
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
|
| 311 |
+
) -> Self:
|
| 312 |
+
return cls._from_sequence(strings, dtype=dtype, copy=copy)
|
| 313 |
+
|
| 314 |
+
@classmethod
|
| 315 |
+
def _from_datetime64(cls, data, freq, tz=None) -> Self:
|
| 316 |
+
"""
|
| 317 |
+
Construct a PeriodArray from a datetime64 array
|
| 318 |
+
|
| 319 |
+
Parameters
|
| 320 |
+
----------
|
| 321 |
+
data : ndarray[datetime64[ns], datetime64[ns, tz]]
|
| 322 |
+
freq : str or Tick
|
| 323 |
+
tz : tzinfo, optional
|
| 324 |
+
|
| 325 |
+
Returns
|
| 326 |
+
-------
|
| 327 |
+
PeriodArray[freq]
|
| 328 |
+
"""
|
| 329 |
+
if isinstance(freq, BaseOffset):
|
| 330 |
+
freq = freq_to_period_freqstr(freq.n, freq.name)
|
| 331 |
+
data, freq = dt64arr_to_periodarr(data, freq, tz)
|
| 332 |
+
dtype = PeriodDtype(freq)
|
| 333 |
+
return cls(data, dtype=dtype)
|
| 334 |
+
|
| 335 |
+
@classmethod
|
| 336 |
+
def _generate_range(cls, start, end, periods, freq):
|
| 337 |
+
periods = dtl.validate_periods(periods)
|
| 338 |
+
|
| 339 |
+
if freq is not None:
|
| 340 |
+
freq = Period._maybe_convert_freq(freq)
|
| 341 |
+
|
| 342 |
+
if start is not None or end is not None:
|
| 343 |
+
subarr, freq = _get_ordinal_range(start, end, periods, freq)
|
| 344 |
+
else:
|
| 345 |
+
raise ValueError("Not enough parameters to construct Period range")
|
| 346 |
+
|
| 347 |
+
return subarr, freq
|
| 348 |
+
|
| 349 |
+
@classmethod
|
| 350 |
+
def _from_fields(cls, *, fields: dict, freq) -> Self:
|
| 351 |
+
subarr, freq = _range_from_fields(freq=freq, **fields)
|
| 352 |
+
dtype = PeriodDtype(freq)
|
| 353 |
+
return cls._simple_new(subarr, dtype=dtype)
|
| 354 |
+
|
| 355 |
+
# -----------------------------------------------------------------
|
| 356 |
+
# DatetimeLike Interface
|
| 357 |
+
|
| 358 |
+
# error: Argument 1 of "_unbox_scalar" is incompatible with supertype
|
| 359 |
+
# "DatetimeLikeArrayMixin"; supertype defines the argument type as
|
| 360 |
+
# "Union[Union[Period, Any, Timedelta], NaTType]"
|
| 361 |
+
def _unbox_scalar( # type: ignore[override]
|
| 362 |
+
self,
|
| 363 |
+
value: Period | NaTType,
|
| 364 |
+
) -> np.int64:
|
| 365 |
+
if value is NaT:
|
| 366 |
+
# error: Item "Period" of "Union[Period, NaTType]" has no attribute "value"
|
| 367 |
+
return np.int64(value._value) # type: ignore[union-attr]
|
| 368 |
+
elif isinstance(value, self._scalar_type):
|
| 369 |
+
self._check_compatible_with(value)
|
| 370 |
+
return np.int64(value.ordinal)
|
| 371 |
+
else:
|
| 372 |
+
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
|
| 373 |
+
|
| 374 |
+
def _scalar_from_string(self, value: str) -> Period:
|
| 375 |
+
return Period(value, freq=self.freq)
|
| 376 |
+
|
| 377 |
+
# error: Argument 1 of "_check_compatible_with" is incompatible with
|
| 378 |
+
# supertype "DatetimeLikeArrayMixin"; supertype defines the argument type
|
| 379 |
+
# as "Period | Timestamp | Timedelta | NaTType"
|
| 380 |
+
def _check_compatible_with(self, other: Period | NaTType | PeriodArray) -> None: # type: ignore[override]
|
| 381 |
+
if other is NaT:
|
| 382 |
+
return
|
| 383 |
+
# error: Item "NaTType" of "Period | NaTType | PeriodArray" has no
|
| 384 |
+
# attribute "freq"
|
| 385 |
+
self._require_matching_freq(other.freq) # type: ignore[union-attr]
|
| 386 |
+
|
| 387 |
+
# --------------------------------------------------------------------
|
| 388 |
+
# Data / Attributes
|
| 389 |
+
|
| 390 |
+
@cache_readonly
|
| 391 |
+
def dtype(self) -> PeriodDtype:
|
| 392 |
+
return self._dtype
|
| 393 |
+
|
| 394 |
+
# error: Cannot override writeable attribute with read-only property
|
| 395 |
+
@property # type: ignore[override]
|
| 396 |
+
def freq(self) -> BaseOffset:
|
| 397 |
+
"""
|
| 398 |
+
Return the frequency object for this PeriodArray.
|
| 399 |
+
"""
|
| 400 |
+
return self.dtype.freq
|
| 401 |
+
|
| 402 |
+
@property
|
| 403 |
+
def freqstr(self) -> str:
|
| 404 |
+
return freq_to_period_freqstr(self.freq.n, self.freq.name)
|
| 405 |
+
|
| 406 |
+
def __array__(
|
| 407 |
+
self, dtype: NpDtype | None = None, copy: bool | None = None
|
| 408 |
+
) -> np.ndarray:
|
| 409 |
+
if dtype == "i8":
|
| 410 |
+
return self.asi8
|
| 411 |
+
elif dtype == bool:
|
| 412 |
+
return ~self._isnan
|
| 413 |
+
|
| 414 |
+
# This will raise TypeError for non-object dtypes
|
| 415 |
+
return np.array(list(self), dtype=object)
|
| 416 |
+
|
| 417 |
+
def __arrow_array__(self, type=None):
|
| 418 |
+
"""
|
| 419 |
+
Convert myself into a pyarrow Array.
|
| 420 |
+
"""
|
| 421 |
+
import pyarrow
|
| 422 |
+
|
| 423 |
+
from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
|
| 424 |
+
|
| 425 |
+
if type is not None:
|
| 426 |
+
if pyarrow.types.is_integer(type):
|
| 427 |
+
return pyarrow.array(self._ndarray, mask=self.isna(), type=type)
|
| 428 |
+
elif isinstance(type, ArrowPeriodType):
|
| 429 |
+
# ensure we have the same freq
|
| 430 |
+
if self.freqstr != type.freq:
|
| 431 |
+
raise TypeError(
|
| 432 |
+
"Not supported to convert PeriodArray to array with different "
|
| 433 |
+
f"'freq' ({self.freqstr} vs {type.freq})"
|
| 434 |
+
)
|
| 435 |
+
else:
|
| 436 |
+
raise TypeError(
|
| 437 |
+
f"Not supported to convert PeriodArray to '{type}' type"
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
period_type = ArrowPeriodType(self.freqstr)
|
| 441 |
+
storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type="int64")
|
| 442 |
+
return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
|
| 443 |
+
|
| 444 |
+
# --------------------------------------------------------------------
|
| 445 |
+
# Vectorized analogues of Period properties
|
| 446 |
+
|
| 447 |
+
year = _field_accessor(
|
| 448 |
+
"year",
|
| 449 |
+
"""
|
| 450 |
+
The year of the period.
|
| 451 |
+
|
| 452 |
+
Examples
|
| 453 |
+
--------
|
| 454 |
+
>>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
|
| 455 |
+
>>> idx.year
|
| 456 |
+
Index([2023, 2024, 2025], dtype='int64')
|
| 457 |
+
""",
|
| 458 |
+
)
|
| 459 |
+
month = _field_accessor(
|
| 460 |
+
"month",
|
| 461 |
+
"""
|
| 462 |
+
The month as January=1, December=12.
|
| 463 |
+
|
| 464 |
+
Examples
|
| 465 |
+
--------
|
| 466 |
+
>>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
|
| 467 |
+
>>> idx.month
|
| 468 |
+
Index([1, 2, 3], dtype='int64')
|
| 469 |
+
""",
|
| 470 |
+
)
|
| 471 |
+
day = _field_accessor(
|
| 472 |
+
"day",
|
| 473 |
+
"""
|
| 474 |
+
The days of the period.
|
| 475 |
+
|
| 476 |
+
Examples
|
| 477 |
+
--------
|
| 478 |
+
>>> idx = pd.PeriodIndex(['2020-01-31', '2020-02-28'], freq='D')
|
| 479 |
+
>>> idx.day
|
| 480 |
+
Index([31, 28], dtype='int64')
|
| 481 |
+
""",
|
| 482 |
+
)
|
| 483 |
+
hour = _field_accessor(
|
| 484 |
+
"hour",
|
| 485 |
+
"""
|
| 486 |
+
The hour of the period.
|
| 487 |
+
|
| 488 |
+
Examples
|
| 489 |
+
--------
|
| 490 |
+
>>> idx = pd.PeriodIndex(["2023-01-01 10:00", "2023-01-01 11:00"], freq='h')
|
| 491 |
+
>>> idx.hour
|
| 492 |
+
Index([10, 11], dtype='int64')
|
| 493 |
+
""",
|
| 494 |
+
)
|
| 495 |
+
minute = _field_accessor(
|
| 496 |
+
"minute",
|
| 497 |
+
"""
|
| 498 |
+
The minute of the period.
|
| 499 |
+
|
| 500 |
+
Examples
|
| 501 |
+
--------
|
| 502 |
+
>>> idx = pd.PeriodIndex(["2023-01-01 10:30:00",
|
| 503 |
+
... "2023-01-01 11:50:00"], freq='min')
|
| 504 |
+
>>> idx.minute
|
| 505 |
+
Index([30, 50], dtype='int64')
|
| 506 |
+
""",
|
| 507 |
+
)
|
| 508 |
+
second = _field_accessor(
|
| 509 |
+
"second",
|
| 510 |
+
"""
|
| 511 |
+
The second of the period.
|
| 512 |
+
|
| 513 |
+
Examples
|
| 514 |
+
--------
|
| 515 |
+
>>> idx = pd.PeriodIndex(["2023-01-01 10:00:30",
|
| 516 |
+
... "2023-01-01 10:00:31"], freq='s')
|
| 517 |
+
>>> idx.second
|
| 518 |
+
Index([30, 31], dtype='int64')
|
| 519 |
+
""",
|
| 520 |
+
)
|
| 521 |
+
weekofyear = _field_accessor(
|
| 522 |
+
"week",
|
| 523 |
+
"""
|
| 524 |
+
The week ordinal of the year.
|
| 525 |
+
|
| 526 |
+
Examples
|
| 527 |
+
--------
|
| 528 |
+
>>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
|
| 529 |
+
>>> idx.week # It can be written `weekofyear`
|
| 530 |
+
Index([5, 9, 13], dtype='int64')
|
| 531 |
+
""",
|
| 532 |
+
)
|
| 533 |
+
week = weekofyear
|
| 534 |
+
day_of_week = _field_accessor(
|
| 535 |
+
"day_of_week",
|
| 536 |
+
"""
|
| 537 |
+
The day of the week with Monday=0, Sunday=6.
|
| 538 |
+
|
| 539 |
+
Examples
|
| 540 |
+
--------
|
| 541 |
+
>>> idx = pd.PeriodIndex(["2023-01-01", "2023-01-02", "2023-01-03"], freq="D")
|
| 542 |
+
>>> idx.weekday
|
| 543 |
+
Index([6, 0, 1], dtype='int64')
|
| 544 |
+
""",
|
| 545 |
+
)
|
| 546 |
+
dayofweek = day_of_week
|
| 547 |
+
weekday = dayofweek
|
| 548 |
+
dayofyear = day_of_year = _field_accessor(
|
| 549 |
+
"day_of_year",
|
| 550 |
+
"""
|
| 551 |
+
The ordinal day of the year.
|
| 552 |
+
|
| 553 |
+
Examples
|
| 554 |
+
--------
|
| 555 |
+
>>> idx = pd.PeriodIndex(["2023-01-10", "2023-02-01", "2023-03-01"], freq="D")
|
| 556 |
+
>>> idx.dayofyear
|
| 557 |
+
Index([10, 32, 60], dtype='int64')
|
| 558 |
+
|
| 559 |
+
>>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
|
| 560 |
+
>>> idx
|
| 561 |
+
PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]')
|
| 562 |
+
>>> idx.dayofyear
|
| 563 |
+
Index([365, 366, 365], dtype='int64')
|
| 564 |
+
""",
|
| 565 |
+
)
|
| 566 |
+
quarter = _field_accessor(
|
| 567 |
+
"quarter",
|
| 568 |
+
"""
|
| 569 |
+
The quarter of the date.
|
| 570 |
+
|
| 571 |
+
Examples
|
| 572 |
+
--------
|
| 573 |
+
>>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
|
| 574 |
+
>>> idx.quarter
|
| 575 |
+
Index([1, 1, 1], dtype='int64')
|
| 576 |
+
""",
|
| 577 |
+
)
|
| 578 |
+
qyear = _field_accessor("qyear")
|
| 579 |
+
days_in_month = _field_accessor(
|
| 580 |
+
"days_in_month",
|
| 581 |
+
"""
|
| 582 |
+
The number of days in the month.
|
| 583 |
+
|
| 584 |
+
Examples
|
| 585 |
+
--------
|
| 586 |
+
For Series:
|
| 587 |
+
|
| 588 |
+
>>> period = pd.period_range('2020-1-1 00:00', '2020-3-1 00:00', freq='M')
|
| 589 |
+
>>> s = pd.Series(period)
|
| 590 |
+
>>> s
|
| 591 |
+
0 2020-01
|
| 592 |
+
1 2020-02
|
| 593 |
+
2 2020-03
|
| 594 |
+
dtype: period[M]
|
| 595 |
+
>>> s.dt.days_in_month
|
| 596 |
+
0 31
|
| 597 |
+
1 29
|
| 598 |
+
2 31
|
| 599 |
+
dtype: int64
|
| 600 |
+
|
| 601 |
+
For PeriodIndex:
|
| 602 |
+
|
| 603 |
+
>>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
|
| 604 |
+
>>> idx.days_in_month # It can be also entered as `daysinmonth`
|
| 605 |
+
Index([31, 28, 31], dtype='int64')
|
| 606 |
+
""",
|
| 607 |
+
)
|
| 608 |
+
daysinmonth = days_in_month
|
| 609 |
+
|
| 610 |
+
@property
|
| 611 |
+
def is_leap_year(self) -> npt.NDArray[np.bool_]:
|
| 612 |
+
"""
|
| 613 |
+
Logical indicating if the date belongs to a leap year.
|
| 614 |
+
|
| 615 |
+
Examples
|
| 616 |
+
--------
|
| 617 |
+
>>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
|
| 618 |
+
>>> idx.is_leap_year
|
| 619 |
+
array([False, True, False])
|
| 620 |
+
"""
|
| 621 |
+
return isleapyear_arr(np.asarray(self.year))
|
| 622 |
+
|
| 623 |
+
def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray:
|
| 624 |
+
"""
|
| 625 |
+
Cast to DatetimeArray/Index.
|
| 626 |
+
|
| 627 |
+
Parameters
|
| 628 |
+
----------
|
| 629 |
+
freq : str or DateOffset, optional
|
| 630 |
+
Target frequency. The default is 'D' for week or longer,
|
| 631 |
+
's' otherwise.
|
| 632 |
+
how : {'s', 'e', 'start', 'end'}
|
| 633 |
+
Whether to use the start or end of the time period being converted.
|
| 634 |
+
|
| 635 |
+
Returns
|
| 636 |
+
-------
|
| 637 |
+
DatetimeArray/Index
|
| 638 |
+
|
| 639 |
+
Examples
|
| 640 |
+
--------
|
| 641 |
+
>>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
|
| 642 |
+
>>> idx.to_timestamp()
|
| 643 |
+
DatetimeIndex(['2023-01-01', '2023-02-01', '2023-03-01'],
|
| 644 |
+
dtype='datetime64[ns]', freq='MS')
|
| 645 |
+
"""
|
| 646 |
+
from pandas.core.arrays import DatetimeArray
|
| 647 |
+
|
| 648 |
+
how = libperiod.validate_end_alias(how)
|
| 649 |
+
|
| 650 |
+
end = how == "E"
|
| 651 |
+
if end:
|
| 652 |
+
if freq == "B" or self.freq == "B":
|
| 653 |
+
# roll forward to ensure we land on B date
|
| 654 |
+
adjust = Timedelta(1, "D") - Timedelta(1, "ns")
|
| 655 |
+
return self.to_timestamp(how="start") + adjust
|
| 656 |
+
else:
|
| 657 |
+
adjust = Timedelta(1, "ns")
|
| 658 |
+
return (self + self.freq).to_timestamp(how="start") - adjust
|
| 659 |
+
|
| 660 |
+
if freq is None:
|
| 661 |
+
freq_code = self._dtype._get_to_timestamp_base()
|
| 662 |
+
dtype = PeriodDtypeBase(freq_code, 1)
|
| 663 |
+
freq = dtype._freqstr
|
| 664 |
+
base = freq_code
|
| 665 |
+
else:
|
| 666 |
+
freq = Period._maybe_convert_freq(freq)
|
| 667 |
+
base = freq._period_dtype_code
|
| 668 |
+
|
| 669 |
+
new_parr = self.asfreq(freq, how=how)
|
| 670 |
+
|
| 671 |
+
new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base)
|
| 672 |
+
dta = DatetimeArray._from_sequence(new_data)
|
| 673 |
+
|
| 674 |
+
if self.freq.name == "B":
|
| 675 |
+
# See if we can retain BDay instead of Day in cases where
|
| 676 |
+
# len(self) is too small for infer_freq to distinguish between them
|
| 677 |
+
diffs = libalgos.unique_deltas(self.asi8)
|
| 678 |
+
if len(diffs) == 1:
|
| 679 |
+
diff = diffs[0]
|
| 680 |
+
if diff == self.dtype._n:
|
| 681 |
+
dta._freq = self.freq
|
| 682 |
+
elif diff == 1:
|
| 683 |
+
dta._freq = self.freq.base
|
| 684 |
+
# TODO: other cases?
|
| 685 |
+
return dta
|
| 686 |
+
else:
|
| 687 |
+
return dta._with_freq("infer")
|
| 688 |
+
|
| 689 |
+
# --------------------------------------------------------------------
|
| 690 |
+
|
| 691 |
+
def _box_func(self, x) -> Period | NaTType:
|
| 692 |
+
return Period._from_ordinal(ordinal=x, freq=self.freq)
|
| 693 |
+
|
| 694 |
+
@doc(**_shared_doc_kwargs, other="PeriodIndex", other_name="PeriodIndex")
|
| 695 |
+
def asfreq(self, freq=None, how: str = "E") -> Self:
|
| 696 |
+
"""
|
| 697 |
+
Convert the {klass} to the specified frequency `freq`.
|
| 698 |
+
|
| 699 |
+
Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments
|
| 700 |
+
to each :class:`~pandas.Period` in this {klass}.
|
| 701 |
+
|
| 702 |
+
Parameters
|
| 703 |
+
----------
|
| 704 |
+
freq : str
|
| 705 |
+
A frequency.
|
| 706 |
+
how : str {{'E', 'S'}}, default 'E'
|
| 707 |
+
Whether the elements should be aligned to the end
|
| 708 |
+
or start within pa period.
|
| 709 |
+
|
| 710 |
+
* 'E', 'END', or 'FINISH' for end,
|
| 711 |
+
* 'S', 'START', or 'BEGIN' for start.
|
| 712 |
+
|
| 713 |
+
January 31st ('END') vs. January 1st ('START') for example.
|
| 714 |
+
|
| 715 |
+
Returns
|
| 716 |
+
-------
|
| 717 |
+
{klass}
|
| 718 |
+
The transformed {klass} with the new frequency.
|
| 719 |
+
|
| 720 |
+
See Also
|
| 721 |
+
--------
|
| 722 |
+
{other}.asfreq: Convert each Period in a {other_name} to the given frequency.
|
| 723 |
+
Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency.
|
| 724 |
+
|
| 725 |
+
Examples
|
| 726 |
+
--------
|
| 727 |
+
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='Y')
|
| 728 |
+
>>> pidx
|
| 729 |
+
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
|
| 730 |
+
dtype='period[Y-DEC]')
|
| 731 |
+
|
| 732 |
+
>>> pidx.asfreq('M')
|
| 733 |
+
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
|
| 734 |
+
'2015-12'], dtype='period[M]')
|
| 735 |
+
|
| 736 |
+
>>> pidx.asfreq('M', how='S')
|
| 737 |
+
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
|
| 738 |
+
'2015-01'], dtype='period[M]')
|
| 739 |
+
"""
|
| 740 |
+
how = libperiod.validate_end_alias(how)
|
| 741 |
+
if isinstance(freq, BaseOffset) and hasattr(freq, "_period_dtype_code"):
|
| 742 |
+
freq = PeriodDtype(freq)._freqstr
|
| 743 |
+
freq = Period._maybe_convert_freq(freq)
|
| 744 |
+
|
| 745 |
+
base1 = self._dtype._dtype_code
|
| 746 |
+
base2 = freq._period_dtype_code
|
| 747 |
+
|
| 748 |
+
asi8 = self.asi8
|
| 749 |
+
# self.freq.n can't be negative or 0
|
| 750 |
+
end = how == "E"
|
| 751 |
+
if end:
|
| 752 |
+
ordinal = asi8 + self.dtype._n - 1
|
| 753 |
+
else:
|
| 754 |
+
ordinal = asi8
|
| 755 |
+
|
| 756 |
+
new_data = period_asfreq_arr(ordinal, base1, base2, end)
|
| 757 |
+
|
| 758 |
+
if self._hasna:
|
| 759 |
+
new_data[self._isnan] = iNaT
|
| 760 |
+
|
| 761 |
+
dtype = PeriodDtype(freq)
|
| 762 |
+
return type(self)(new_data, dtype=dtype)
|
| 763 |
+
|
| 764 |
+
# ------------------------------------------------------------------
|
| 765 |
+
# Rendering Methods
|
| 766 |
+
|
| 767 |
+
def _formatter(self, boxed: bool = False):
|
| 768 |
+
if boxed:
|
| 769 |
+
return str
|
| 770 |
+
return "'{}'".format
|
| 771 |
+
|
| 772 |
+
def _format_native_types(
|
| 773 |
+
self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
|
| 774 |
+
) -> npt.NDArray[np.object_]:
|
| 775 |
+
"""
|
| 776 |
+
actually format my specific types
|
| 777 |
+
"""
|
| 778 |
+
return libperiod.period_array_strftime(
|
| 779 |
+
self.asi8, self.dtype._dtype_code, na_rep, date_format
|
| 780 |
+
)
|
| 781 |
+
|
| 782 |
+
# ------------------------------------------------------------------
|
| 783 |
+
|
| 784 |
+
def astype(self, dtype, copy: bool = True):
|
| 785 |
+
# We handle Period[T] -> Period[U]
|
| 786 |
+
# Our parent handles everything else.
|
| 787 |
+
dtype = pandas_dtype(dtype)
|
| 788 |
+
if dtype == self._dtype:
|
| 789 |
+
if not copy:
|
| 790 |
+
return self
|
| 791 |
+
else:
|
| 792 |
+
return self.copy()
|
| 793 |
+
if isinstance(dtype, PeriodDtype):
|
| 794 |
+
return self.asfreq(dtype.freq)
|
| 795 |
+
|
| 796 |
+
if lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype):
|
| 797 |
+
# GH#45038 match PeriodIndex behavior.
|
| 798 |
+
tz = getattr(dtype, "tz", None)
|
| 799 |
+
unit = dtl.dtype_to_unit(dtype)
|
| 800 |
+
return self.to_timestamp().tz_localize(tz).as_unit(unit)
|
| 801 |
+
|
| 802 |
+
return super().astype(dtype, copy=copy)
|
| 803 |
+
|
| 804 |
+
def searchsorted(
|
| 805 |
+
self,
|
| 806 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
| 807 |
+
side: Literal["left", "right"] = "left",
|
| 808 |
+
sorter: NumpySorter | None = None,
|
| 809 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
| 810 |
+
npvalue = self._validate_setitem_value(value).view("M8[ns]")
|
| 811 |
+
|
| 812 |
+
# Cast to M8 to get datetime-like NaT placement,
|
| 813 |
+
# similar to dtl._period_dispatch
|
| 814 |
+
m8arr = self._ndarray.view("M8[ns]")
|
| 815 |
+
return m8arr.searchsorted(npvalue, side=side, sorter=sorter)
|
| 816 |
+
|
| 817 |
+
def _pad_or_backfill(
|
| 818 |
+
self,
|
| 819 |
+
*,
|
| 820 |
+
method: FillnaOptions,
|
| 821 |
+
limit: int | None = None,
|
| 822 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
| 823 |
+
copy: bool = True,
|
| 824 |
+
) -> Self:
|
| 825 |
+
# view as dt64 so we get treated as timelike in core.missing,
|
| 826 |
+
# similar to dtl._period_dispatch
|
| 827 |
+
dta = self.view("M8[ns]")
|
| 828 |
+
result = dta._pad_or_backfill(
|
| 829 |
+
method=method, limit=limit, limit_area=limit_area, copy=copy
|
| 830 |
+
)
|
| 831 |
+
if copy:
|
| 832 |
+
return cast("Self", result.view(self.dtype))
|
| 833 |
+
else:
|
| 834 |
+
return self
|
| 835 |
+
|
| 836 |
+
def fillna(
|
| 837 |
+
self, value=None, method=None, limit: int | None = None, copy: bool = True
|
| 838 |
+
) -> Self:
|
| 839 |
+
if method is not None:
|
| 840 |
+
# view as dt64 so we get treated as timelike in core.missing,
|
| 841 |
+
# similar to dtl._period_dispatch
|
| 842 |
+
dta = self.view("M8[ns]")
|
| 843 |
+
result = dta.fillna(value=value, method=method, limit=limit, copy=copy)
|
| 844 |
+
# error: Incompatible return value type (got "Union[ExtensionArray,
|
| 845 |
+
# ndarray[Any, Any]]", expected "PeriodArray")
|
| 846 |
+
return result.view(self.dtype) # type: ignore[return-value]
|
| 847 |
+
return super().fillna(value=value, method=method, limit=limit, copy=copy)
|
| 848 |
+
|
| 849 |
+
# ------------------------------------------------------------------
|
| 850 |
+
# Arithmetic Methods
|
| 851 |
+
|
| 852 |
+
def _addsub_int_array_or_scalar(
|
| 853 |
+
self, other: np.ndarray | int, op: Callable[[Any, Any], Any]
|
| 854 |
+
) -> Self:
|
| 855 |
+
"""
|
| 856 |
+
Add or subtract array of integers.
|
| 857 |
+
|
| 858 |
+
Parameters
|
| 859 |
+
----------
|
| 860 |
+
other : np.ndarray[int64] or int
|
| 861 |
+
op : {operator.add, operator.sub}
|
| 862 |
+
|
| 863 |
+
Returns
|
| 864 |
+
-------
|
| 865 |
+
result : PeriodArray
|
| 866 |
+
"""
|
| 867 |
+
assert op in [operator.add, operator.sub]
|
| 868 |
+
if op is operator.sub:
|
| 869 |
+
other = -other
|
| 870 |
+
res_values = add_overflowsafe(self.asi8, np.asarray(other, dtype="i8"))
|
| 871 |
+
return type(self)(res_values, dtype=self.dtype)
|
| 872 |
+
|
| 873 |
+
def _add_offset(self, other: BaseOffset):
|
| 874 |
+
assert not isinstance(other, Tick)
|
| 875 |
+
|
| 876 |
+
self._require_matching_freq(other, base=True)
|
| 877 |
+
return self._addsub_int_array_or_scalar(other.n, operator.add)
|
| 878 |
+
|
| 879 |
+
# TODO: can we de-duplicate with Period._add_timedeltalike_scalar?
|
| 880 |
+
def _add_timedeltalike_scalar(self, other):
|
| 881 |
+
"""
|
| 882 |
+
Parameters
|
| 883 |
+
----------
|
| 884 |
+
other : timedelta, Tick, np.timedelta64
|
| 885 |
+
|
| 886 |
+
Returns
|
| 887 |
+
-------
|
| 888 |
+
PeriodArray
|
| 889 |
+
"""
|
| 890 |
+
if not isinstance(self.freq, Tick):
|
| 891 |
+
# We cannot add timedelta-like to non-tick PeriodArray
|
| 892 |
+
raise raise_on_incompatible(self, other)
|
| 893 |
+
|
| 894 |
+
if isna(other):
|
| 895 |
+
# i.e. np.timedelta64("NaT")
|
| 896 |
+
return super()._add_timedeltalike_scalar(other)
|
| 897 |
+
|
| 898 |
+
td = np.asarray(Timedelta(other).asm8)
|
| 899 |
+
return self._add_timedelta_arraylike(td)
|
| 900 |
+
|
| 901 |
+
def _add_timedelta_arraylike(
|
| 902 |
+
self, other: TimedeltaArray | npt.NDArray[np.timedelta64]
|
| 903 |
+
) -> Self:
|
| 904 |
+
"""
|
| 905 |
+
Parameters
|
| 906 |
+
----------
|
| 907 |
+
other : TimedeltaArray or ndarray[timedelta64]
|
| 908 |
+
|
| 909 |
+
Returns
|
| 910 |
+
-------
|
| 911 |
+
PeriodArray
|
| 912 |
+
"""
|
| 913 |
+
if not self.dtype._is_tick_like():
|
| 914 |
+
# We cannot add timedelta-like to non-tick PeriodArray
|
| 915 |
+
raise TypeError(
|
| 916 |
+
f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}"
|
| 917 |
+
)
|
| 918 |
+
|
| 919 |
+
dtype = np.dtype(f"m8[{self.dtype._td64_unit}]")
|
| 920 |
+
|
| 921 |
+
# Similar to _check_timedeltalike_freq_compat, but we raise with a
|
| 922 |
+
# more specific exception message if necessary.
|
| 923 |
+
try:
|
| 924 |
+
delta = astype_overflowsafe(
|
| 925 |
+
np.asarray(other), dtype=dtype, copy=False, round_ok=False
|
| 926 |
+
)
|
| 927 |
+
except ValueError as err:
|
| 928 |
+
# e.g. if we have minutes freq and try to add 30s
|
| 929 |
+
# "Cannot losslessly convert units"
|
| 930 |
+
raise IncompatibleFrequency(
|
| 931 |
+
"Cannot add/subtract timedelta-like from PeriodArray that is "
|
| 932 |
+
"not an integer multiple of the PeriodArray's freq."
|
| 933 |
+
) from err
|
| 934 |
+
|
| 935 |
+
res_values = add_overflowsafe(self.asi8, np.asarray(delta.view("i8")))
|
| 936 |
+
return type(self)(res_values, dtype=self.dtype)
|
| 937 |
+
|
| 938 |
+
def _check_timedeltalike_freq_compat(self, other):
|
| 939 |
+
"""
|
| 940 |
+
Arithmetic operations with timedelta-like scalars or array `other`
|
| 941 |
+
are only valid if `other` is an integer multiple of `self.freq`.
|
| 942 |
+
If the operation is valid, find that integer multiple. Otherwise,
|
| 943 |
+
raise because the operation is invalid.
|
| 944 |
+
|
| 945 |
+
Parameters
|
| 946 |
+
----------
|
| 947 |
+
other : timedelta, np.timedelta64, Tick,
|
| 948 |
+
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
|
| 949 |
+
|
| 950 |
+
Returns
|
| 951 |
+
-------
|
| 952 |
+
multiple : int or ndarray[int64]
|
| 953 |
+
|
| 954 |
+
Raises
|
| 955 |
+
------
|
| 956 |
+
IncompatibleFrequency
|
| 957 |
+
"""
|
| 958 |
+
assert self.dtype._is_tick_like() # checked by calling function
|
| 959 |
+
|
| 960 |
+
dtype = np.dtype(f"m8[{self.dtype._td64_unit}]")
|
| 961 |
+
|
| 962 |
+
if isinstance(other, (timedelta, np.timedelta64, Tick)):
|
| 963 |
+
td = np.asarray(Timedelta(other).asm8)
|
| 964 |
+
else:
|
| 965 |
+
td = np.asarray(other)
|
| 966 |
+
|
| 967 |
+
try:
|
| 968 |
+
delta = astype_overflowsafe(td, dtype=dtype, copy=False, round_ok=False)
|
| 969 |
+
except ValueError as err:
|
| 970 |
+
raise raise_on_incompatible(self, other) from err
|
| 971 |
+
|
| 972 |
+
delta = delta.view("i8")
|
| 973 |
+
return lib.item_from_zerodim(delta)
|
| 974 |
+
|
| 975 |
+
|
| 976 |
+
def raise_on_incompatible(left, right) -> IncompatibleFrequency:
|
| 977 |
+
"""
|
| 978 |
+
Helper function to render a consistent error message when raising
|
| 979 |
+
IncompatibleFrequency.
|
| 980 |
+
|
| 981 |
+
Parameters
|
| 982 |
+
----------
|
| 983 |
+
left : PeriodArray
|
| 984 |
+
right : None, DateOffset, Period, ndarray, or timedelta-like
|
| 985 |
+
|
| 986 |
+
Returns
|
| 987 |
+
-------
|
| 988 |
+
IncompatibleFrequency
|
| 989 |
+
Exception to be raised by the caller.
|
| 990 |
+
"""
|
| 991 |
+
# GH#24283 error message format depends on whether right is scalar
|
| 992 |
+
if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:
|
| 993 |
+
other_freq = None
|
| 994 |
+
elif isinstance(right, BaseOffset):
|
| 995 |
+
other_freq = freq_to_period_freqstr(right.n, right.name)
|
| 996 |
+
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period)):
|
| 997 |
+
other_freq = right.freqstr
|
| 998 |
+
else:
|
| 999 |
+
other_freq = delta_to_tick(Timedelta(right)).freqstr
|
| 1000 |
+
|
| 1001 |
+
own_freq = freq_to_period_freqstr(left.freq.n, left.freq.name)
|
| 1002 |
+
msg = DIFFERENT_FREQ.format(
|
| 1003 |
+
cls=type(left).__name__, own_freq=own_freq, other_freq=other_freq
|
| 1004 |
+
)
|
| 1005 |
+
return IncompatibleFrequency(msg)
|
| 1006 |
+
|
| 1007 |
+
|
| 1008 |
+
# -------------------------------------------------------------------
|
| 1009 |
+
# Constructor Helpers
|
| 1010 |
+
|
| 1011 |
+
|
| 1012 |
+
def period_array(
|
| 1013 |
+
data: Sequence[Period | str | None] | AnyArrayLike,
|
| 1014 |
+
freq: str | Tick | BaseOffset | None = None,
|
| 1015 |
+
copy: bool = False,
|
| 1016 |
+
) -> PeriodArray:
|
| 1017 |
+
"""
|
| 1018 |
+
Construct a new PeriodArray from a sequence of Period scalars.
|
| 1019 |
+
|
| 1020 |
+
Parameters
|
| 1021 |
+
----------
|
| 1022 |
+
data : Sequence of Period objects
|
| 1023 |
+
A sequence of Period objects. These are required to all have
|
| 1024 |
+
the same ``freq.`` Missing values can be indicated by ``None``
|
| 1025 |
+
or ``pandas.NaT``.
|
| 1026 |
+
freq : str, Tick, or Offset
|
| 1027 |
+
The frequency of every element of the array. This can be specified
|
| 1028 |
+
to avoid inferring the `freq` from `data`.
|
| 1029 |
+
copy : bool, default False
|
| 1030 |
+
Whether to ensure a copy of the data is made.
|
| 1031 |
+
|
| 1032 |
+
Returns
|
| 1033 |
+
-------
|
| 1034 |
+
PeriodArray
|
| 1035 |
+
|
| 1036 |
+
See Also
|
| 1037 |
+
--------
|
| 1038 |
+
PeriodArray
|
| 1039 |
+
pandas.PeriodIndex
|
| 1040 |
+
|
| 1041 |
+
Examples
|
| 1042 |
+
--------
|
| 1043 |
+
>>> period_array([pd.Period('2017', freq='Y'),
|
| 1044 |
+
... pd.Period('2018', freq='Y')])
|
| 1045 |
+
<PeriodArray>
|
| 1046 |
+
['2017', '2018']
|
| 1047 |
+
Length: 2, dtype: period[Y-DEC]
|
| 1048 |
+
|
| 1049 |
+
>>> period_array([pd.Period('2017', freq='Y'),
|
| 1050 |
+
... pd.Period('2018', freq='Y'),
|
| 1051 |
+
... pd.NaT])
|
| 1052 |
+
<PeriodArray>
|
| 1053 |
+
['2017', '2018', 'NaT']
|
| 1054 |
+
Length: 3, dtype: period[Y-DEC]
|
| 1055 |
+
|
| 1056 |
+
Integers that look like years are handled
|
| 1057 |
+
|
| 1058 |
+
>>> period_array([2000, 2001, 2002], freq='D')
|
| 1059 |
+
<PeriodArray>
|
| 1060 |
+
['2000-01-01', '2001-01-01', '2002-01-01']
|
| 1061 |
+
Length: 3, dtype: period[D]
|
| 1062 |
+
|
| 1063 |
+
Datetime-like strings may also be passed
|
| 1064 |
+
|
| 1065 |
+
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
|
| 1066 |
+
<PeriodArray>
|
| 1067 |
+
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
|
| 1068 |
+
Length: 4, dtype: period[Q-DEC]
|
| 1069 |
+
"""
|
| 1070 |
+
data_dtype = getattr(data, "dtype", None)
|
| 1071 |
+
|
| 1072 |
+
if lib.is_np_dtype(data_dtype, "M"):
|
| 1073 |
+
return PeriodArray._from_datetime64(data, freq)
|
| 1074 |
+
if isinstance(data_dtype, PeriodDtype):
|
| 1075 |
+
out = PeriodArray(data)
|
| 1076 |
+
if freq is not None:
|
| 1077 |
+
if freq == data_dtype.freq:
|
| 1078 |
+
return out
|
| 1079 |
+
return out.asfreq(freq)
|
| 1080 |
+
return out
|
| 1081 |
+
|
| 1082 |
+
# other iterable of some kind
|
| 1083 |
+
if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)):
|
| 1084 |
+
data = list(data)
|
| 1085 |
+
|
| 1086 |
+
arrdata = np.asarray(data)
|
| 1087 |
+
|
| 1088 |
+
dtype: PeriodDtype | None
|
| 1089 |
+
if freq:
|
| 1090 |
+
dtype = PeriodDtype(freq)
|
| 1091 |
+
else:
|
| 1092 |
+
dtype = None
|
| 1093 |
+
|
| 1094 |
+
if arrdata.dtype.kind == "f" and len(arrdata) > 0:
|
| 1095 |
+
raise TypeError("PeriodIndex does not allow floating point in construction")
|
| 1096 |
+
|
| 1097 |
+
if arrdata.dtype.kind in "iu":
|
| 1098 |
+
arr = arrdata.astype(np.int64, copy=False)
|
| 1099 |
+
# error: Argument 2 to "from_ordinals" has incompatible type "Union[str,
|
| 1100 |
+
# Tick, None]"; expected "Union[timedelta, BaseOffset, str]"
|
| 1101 |
+
ordinals = libperiod.from_ordinals(arr, freq) # type: ignore[arg-type]
|
| 1102 |
+
return PeriodArray(ordinals, dtype=dtype)
|
| 1103 |
+
|
| 1104 |
+
data = ensure_object(arrdata)
|
| 1105 |
+
if freq is None:
|
| 1106 |
+
freq = libperiod.extract_freq(data)
|
| 1107 |
+
dtype = PeriodDtype(freq)
|
| 1108 |
+
return PeriodArray._from_sequence(data, dtype=dtype)
|
| 1109 |
+
|
| 1110 |
+
|
| 1111 |
+
@overload
|
| 1112 |
+
def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT:
|
| 1113 |
+
...
|
| 1114 |
+
|
| 1115 |
+
|
| 1116 |
+
@overload
|
| 1117 |
+
def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset:
|
| 1118 |
+
...
|
| 1119 |
+
|
| 1120 |
+
|
| 1121 |
+
def validate_dtype_freq(
|
| 1122 |
+
dtype, freq: BaseOffsetT | BaseOffset | timedelta | str | None
|
| 1123 |
+
) -> BaseOffsetT:
|
| 1124 |
+
"""
|
| 1125 |
+
If both a dtype and a freq are available, ensure they match. If only
|
| 1126 |
+
dtype is available, extract the implied freq.
|
| 1127 |
+
|
| 1128 |
+
Parameters
|
| 1129 |
+
----------
|
| 1130 |
+
dtype : dtype
|
| 1131 |
+
freq : DateOffset or None
|
| 1132 |
+
|
| 1133 |
+
Returns
|
| 1134 |
+
-------
|
| 1135 |
+
freq : DateOffset
|
| 1136 |
+
|
| 1137 |
+
Raises
|
| 1138 |
+
------
|
| 1139 |
+
ValueError : non-period dtype
|
| 1140 |
+
IncompatibleFrequency : mismatch between dtype and freq
|
| 1141 |
+
"""
|
| 1142 |
+
if freq is not None:
|
| 1143 |
+
freq = to_offset(freq, is_period=True)
|
| 1144 |
+
|
| 1145 |
+
if dtype is not None:
|
| 1146 |
+
dtype = pandas_dtype(dtype)
|
| 1147 |
+
if not isinstance(dtype, PeriodDtype):
|
| 1148 |
+
raise ValueError("dtype must be PeriodDtype")
|
| 1149 |
+
if freq is None:
|
| 1150 |
+
freq = dtype.freq
|
| 1151 |
+
elif freq != dtype.freq:
|
| 1152 |
+
raise IncompatibleFrequency("specified freq and dtype are different")
|
| 1153 |
+
# error: Incompatible return value type (got "Union[BaseOffset, Any, None]",
|
| 1154 |
+
# expected "BaseOffset")
|
| 1155 |
+
return freq # type: ignore[return-value]
|
| 1156 |
+
|
| 1157 |
+
|
| 1158 |
+
def dt64arr_to_periodarr(
|
| 1159 |
+
data, freq, tz=None
|
| 1160 |
+
) -> tuple[npt.NDArray[np.int64], BaseOffset]:
|
| 1161 |
+
"""
|
| 1162 |
+
Convert an datetime-like array to values Period ordinals.
|
| 1163 |
+
|
| 1164 |
+
Parameters
|
| 1165 |
+
----------
|
| 1166 |
+
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
|
| 1167 |
+
freq : Optional[Union[str, Tick]]
|
| 1168 |
+
Must match the `freq` on the `data` if `data` is a DatetimeIndex
|
| 1169 |
+
or Series.
|
| 1170 |
+
tz : Optional[tzinfo]
|
| 1171 |
+
|
| 1172 |
+
Returns
|
| 1173 |
+
-------
|
| 1174 |
+
ordinals : ndarray[int64]
|
| 1175 |
+
freq : Tick
|
| 1176 |
+
The frequency extracted from the Series or DatetimeIndex if that's
|
| 1177 |
+
used.
|
| 1178 |
+
|
| 1179 |
+
"""
|
| 1180 |
+
if not isinstance(data.dtype, np.dtype) or data.dtype.kind != "M":
|
| 1181 |
+
raise ValueError(f"Wrong dtype: {data.dtype}")
|
| 1182 |
+
|
| 1183 |
+
if freq is None:
|
| 1184 |
+
if isinstance(data, ABCIndex):
|
| 1185 |
+
data, freq = data._values, data.freq
|
| 1186 |
+
elif isinstance(data, ABCSeries):
|
| 1187 |
+
data, freq = data._values, data.dt.freq
|
| 1188 |
+
|
| 1189 |
+
elif isinstance(data, (ABCIndex, ABCSeries)):
|
| 1190 |
+
data = data._values
|
| 1191 |
+
|
| 1192 |
+
reso = get_unit_from_dtype(data.dtype)
|
| 1193 |
+
freq = Period._maybe_convert_freq(freq)
|
| 1194 |
+
base = freq._period_dtype_code
|
| 1195 |
+
return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq
|
| 1196 |
+
|
| 1197 |
+
|
| 1198 |
+
def _get_ordinal_range(start, end, periods, freq, mult: int = 1):
|
| 1199 |
+
if com.count_not_none(start, end, periods) != 2:
|
| 1200 |
+
raise ValueError(
|
| 1201 |
+
"Of the three parameters: start, end, and periods, "
|
| 1202 |
+
"exactly two must be specified"
|
| 1203 |
+
)
|
| 1204 |
+
|
| 1205 |
+
if freq is not None:
|
| 1206 |
+
freq = to_offset(freq, is_period=True)
|
| 1207 |
+
mult = freq.n
|
| 1208 |
+
|
| 1209 |
+
if start is not None:
|
| 1210 |
+
start = Period(start, freq)
|
| 1211 |
+
if end is not None:
|
| 1212 |
+
end = Period(end, freq)
|
| 1213 |
+
|
| 1214 |
+
is_start_per = isinstance(start, Period)
|
| 1215 |
+
is_end_per = isinstance(end, Period)
|
| 1216 |
+
|
| 1217 |
+
if is_start_per and is_end_per and start.freq != end.freq:
|
| 1218 |
+
raise ValueError("start and end must have same freq")
|
| 1219 |
+
if start is NaT or end is NaT:
|
| 1220 |
+
raise ValueError("start and end must not be NaT")
|
| 1221 |
+
|
| 1222 |
+
if freq is None:
|
| 1223 |
+
if is_start_per:
|
| 1224 |
+
freq = start.freq
|
| 1225 |
+
elif is_end_per:
|
| 1226 |
+
freq = end.freq
|
| 1227 |
+
else: # pragma: no cover
|
| 1228 |
+
raise ValueError("Could not infer freq from start/end")
|
| 1229 |
+
mult = freq.n
|
| 1230 |
+
|
| 1231 |
+
if periods is not None:
|
| 1232 |
+
periods = periods * mult
|
| 1233 |
+
if start is None:
|
| 1234 |
+
data = np.arange(
|
| 1235 |
+
end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64
|
| 1236 |
+
)
|
| 1237 |
+
else:
|
| 1238 |
+
data = np.arange(
|
| 1239 |
+
start.ordinal, start.ordinal + periods, mult, dtype=np.int64
|
| 1240 |
+
)
|
| 1241 |
+
else:
|
| 1242 |
+
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
|
| 1243 |
+
|
| 1244 |
+
return data, freq
|
| 1245 |
+
|
| 1246 |
+
|
| 1247 |
+
def _range_from_fields(
|
| 1248 |
+
year=None,
|
| 1249 |
+
month=None,
|
| 1250 |
+
quarter=None,
|
| 1251 |
+
day=None,
|
| 1252 |
+
hour=None,
|
| 1253 |
+
minute=None,
|
| 1254 |
+
second=None,
|
| 1255 |
+
freq=None,
|
| 1256 |
+
) -> tuple[np.ndarray, BaseOffset]:
|
| 1257 |
+
if hour is None:
|
| 1258 |
+
hour = 0
|
| 1259 |
+
if minute is None:
|
| 1260 |
+
minute = 0
|
| 1261 |
+
if second is None:
|
| 1262 |
+
second = 0
|
| 1263 |
+
if day is None:
|
| 1264 |
+
day = 1
|
| 1265 |
+
|
| 1266 |
+
ordinals = []
|
| 1267 |
+
|
| 1268 |
+
if quarter is not None:
|
| 1269 |
+
if freq is None:
|
| 1270 |
+
freq = to_offset("Q", is_period=True)
|
| 1271 |
+
base = FreqGroup.FR_QTR.value
|
| 1272 |
+
else:
|
| 1273 |
+
freq = to_offset(freq, is_period=True)
|
| 1274 |
+
base = libperiod.freq_to_dtype_code(freq)
|
| 1275 |
+
if base != FreqGroup.FR_QTR.value:
|
| 1276 |
+
raise AssertionError("base must equal FR_QTR")
|
| 1277 |
+
|
| 1278 |
+
freqstr = freq.freqstr
|
| 1279 |
+
year, quarter = _make_field_arrays(year, quarter)
|
| 1280 |
+
for y, q in zip(year, quarter):
|
| 1281 |
+
calendar_year, calendar_month = parsing.quarter_to_myear(y, q, freqstr)
|
| 1282 |
+
val = libperiod.period_ordinal(
|
| 1283 |
+
calendar_year, calendar_month, 1, 1, 1, 1, 0, 0, base
|
| 1284 |
+
)
|
| 1285 |
+
ordinals.append(val)
|
| 1286 |
+
else:
|
| 1287 |
+
freq = to_offset(freq, is_period=True)
|
| 1288 |
+
base = libperiod.freq_to_dtype_code(freq)
|
| 1289 |
+
arrays = _make_field_arrays(year, month, day, hour, minute, second)
|
| 1290 |
+
for y, mth, d, h, mn, s in zip(*arrays):
|
| 1291 |
+
ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
|
| 1292 |
+
|
| 1293 |
+
return np.array(ordinals, dtype=np.int64), freq
|
| 1294 |
+
|
| 1295 |
+
|
| 1296 |
+
def _make_field_arrays(*fields) -> list[np.ndarray]:
|
| 1297 |
+
length = None
|
| 1298 |
+
for x in fields:
|
| 1299 |
+
if isinstance(x, (list, np.ndarray, ABCSeries)):
|
| 1300 |
+
if length is not None and len(x) != length:
|
| 1301 |
+
raise ValueError("Mismatched Period array lengths")
|
| 1302 |
+
if length is None:
|
| 1303 |
+
length = len(x)
|
| 1304 |
+
|
| 1305 |
+
# error: Argument 2 to "repeat" has incompatible type "Optional[int]"; expected
|
| 1306 |
+
# "Union[Union[int, integer[Any]], Union[bool, bool_], ndarray, Sequence[Union[int,
|
| 1307 |
+
# integer[Any]]], Sequence[Union[bool, bool_]], Sequence[Sequence[Any]]]"
|
| 1308 |
+
return [
|
| 1309 |
+
np.asarray(x)
|
| 1310 |
+
if isinstance(x, (np.ndarray, list, ABCSeries))
|
| 1311 |
+
else np.repeat(x, length) # type: ignore[arg-type]
|
| 1312 |
+
for x in fields
|
| 1313 |
+
]
|
videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (180 Bytes). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc
ADDED
|
Binary file (3.6 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc
ADDED
|
Binary file (4.68 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc
ADDED
|
Binary file (17.9 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc
ADDED
|
Binary file (12.2 kB). View file
|
|
|
videollama2/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (4.54 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""OpenGL Extensions"""
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/draw_buffers.cpython-310.pyc
ADDED
|
Binary file (1.66 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/envmap_bumpmap.cpython-310.pyc
ADDED
|
Binary file (2.23 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/fragment_shader.cpython-310.pyc
ADDED
|
Binary file (2.09 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/pn_triangles.cpython-310.pyc
ADDED
|
Binary file (1.71 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/texture_float.cpython-310.pyc
ADDED
|
Binary file (1.39 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/texture_mirror_once.cpython-310.pyc
ADDED
|
Binary file (1.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/__pycache__/vertex_attrib_array_object.cpython-310.pyc
ADDED
|
Binary file (1.57 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/draw_buffers.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.draw_buffers
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.draw_buffers to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension extends ARB_fragment_program to allow multiple output
|
| 10 |
+
colors, and provides a mechanism for directing those outputs to
|
| 11 |
+
multiple color buffers.
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
The official definition of this extension is available here:
|
| 15 |
+
http://www.opengl.org/registry/specs/ATI/draw_buffers.txt
|
| 16 |
+
'''
|
| 17 |
+
from OpenGL import platform, constant, arrays
|
| 18 |
+
from OpenGL import extensions, wrapper
|
| 19 |
+
import ctypes
|
| 20 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 21 |
+
from OpenGL.raw.GL.ATI.draw_buffers import *
|
| 22 |
+
from OpenGL.raw.GL.ATI.draw_buffers import _EXTENSION_NAME
|
| 23 |
+
|
| 24 |
+
def glInitDrawBuffersATI():
|
| 25 |
+
'''Return boolean indicating whether this extension is available'''
|
| 26 |
+
from OpenGL import extensions
|
| 27 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 28 |
+
|
| 29 |
+
# INPUT glDrawBuffersATI.bufs size not checked against n
|
| 30 |
+
glDrawBuffersATI=wrapper.wrapper(glDrawBuffersATI).setInputArraySize(
|
| 31 |
+
'bufs', None
|
| 32 |
+
)
|
| 33 |
+
### END AUTOGENERATED SECTION
|
| 34 |
+
from OpenGL.lazywrapper import lazy as _lazy
|
| 35 |
+
@_lazy( glDrawBuffersATI )
|
| 36 |
+
def glDrawBuffersATI( baseOperation, n=None, bufs=None ):
|
| 37 |
+
"""glDrawBuffersATI( bufs ) -> bufs
|
| 38 |
+
|
| 39 |
+
Wrapper will calculate n from dims of bufs if only
|
| 40 |
+
one argument is provided...
|
| 41 |
+
"""
|
| 42 |
+
if bufs is None:
|
| 43 |
+
bufs = n
|
| 44 |
+
n = None
|
| 45 |
+
bufs = arrays.GLenumArray.asArray( bufs )
|
| 46 |
+
if n is None:
|
| 47 |
+
n = arrays.GLenumArray.arraySize( bufs )
|
| 48 |
+
return baseOperation( n,bufs )
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/envmap_bumpmap.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.envmap_bumpmap
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.envmap_bumpmap to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension adds environment mapped bump mapping (EMBM) to the GL.
|
| 10 |
+
The method exposed by this extension is to use a dependent texture
|
| 11 |
+
read on a bumpmap (du,dv) texture to offset the texture coordinates
|
| 12 |
+
read into a map on another texture unit. This (du,dv) offset is also
|
| 13 |
+
rotated through a user-specified rotation matrix to get the texture
|
| 14 |
+
coordinates into the appropriate space.
|
| 15 |
+
|
| 16 |
+
A new texture format is introduced in order for specifying the (du,dv)
|
| 17 |
+
bumpmap texture. This map represents -1 <= du,dv <= 1 offsets to
|
| 18 |
+
be applied to the texture coordinates used to read into the base
|
| 19 |
+
map. Additionally, the (du,dv) offsets are transformed by a rotation
|
| 20 |
+
matrix that this extension allows the user to specify. Further, a
|
| 21 |
+
new color operation is added to EXT_texture_env_combine to specify
|
| 22 |
+
both that bumpmapping is enabled and which texture unit to apply
|
| 23 |
+
the bump offset to.
|
| 24 |
+
|
| 25 |
+
The official definition of this extension is available here:
|
| 26 |
+
http://www.opengl.org/registry/specs/ATI/envmap_bumpmap.txt
|
| 27 |
+
'''
|
| 28 |
+
from OpenGL import platform, constant, arrays
|
| 29 |
+
from OpenGL import extensions, wrapper
|
| 30 |
+
import ctypes
|
| 31 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 32 |
+
from OpenGL.raw.GL.ATI.envmap_bumpmap import *
|
| 33 |
+
from OpenGL.raw.GL.ATI.envmap_bumpmap import _EXTENSION_NAME
|
| 34 |
+
|
| 35 |
+
def glInitEnvmapBumpmapATI():
|
| 36 |
+
'''Return boolean indicating whether this extension is available'''
|
| 37 |
+
from OpenGL import extensions
|
| 38 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 39 |
+
|
| 40 |
+
# INPUT glTexBumpParameterivATI.param size not checked against 'pname'
|
| 41 |
+
glTexBumpParameterivATI=wrapper.wrapper(glTexBumpParameterivATI).setInputArraySize(
|
| 42 |
+
'param', None
|
| 43 |
+
)
|
| 44 |
+
# INPUT glTexBumpParameterfvATI.param size not checked against 'pname'
|
| 45 |
+
glTexBumpParameterfvATI=wrapper.wrapper(glTexBumpParameterfvATI).setInputArraySize(
|
| 46 |
+
'param', None
|
| 47 |
+
)
|
| 48 |
+
glGetTexBumpParameterivATI=wrapper.wrapper(glGetTexBumpParameterivATI).setOutput(
|
| 49 |
+
'param',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
|
| 50 |
+
)
|
| 51 |
+
glGetTexBumpParameterfvATI=wrapper.wrapper(glGetTexBumpParameterfvATI).setOutput(
|
| 52 |
+
'param',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
|
| 53 |
+
)
|
| 54 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/fragment_shader.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.fragment_shader
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.fragment_shader to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension exposes a powerful fragment shading model which
|
| 10 |
+
provides a very general means of expressing fragment color blending
|
| 11 |
+
and dependent texture address modification. The programming is
|
| 12 |
+
a register-based model in which there is a fixed number of
|
| 13 |
+
instructions, texture lookups, read/write registers, and constants.
|
| 14 |
+
|
| 15 |
+
The fragment shader extension provides a unified instruction set
|
| 16 |
+
for operating on address or color data and eliminates the
|
| 17 |
+
distinction between the two. This extension provides all the
|
| 18 |
+
interfaces necessary to fully expose this programmable fragment
|
| 19 |
+
shader in GL.
|
| 20 |
+
|
| 21 |
+
Although conceived as a device-independent extension which would
|
| 22 |
+
expose the capabilities of future generations of hardware, changing
|
| 23 |
+
trends in programmable hardware have affected the lifespan of this
|
| 24 |
+
extension. For this reason you will now find a fixed set of
|
| 25 |
+
features and resources exposed, and the queries to determine this
|
| 26 |
+
set have been deprecated.
|
| 27 |
+
|
| 28 |
+
The official definition of this extension is available here:
|
| 29 |
+
http://www.opengl.org/registry/specs/ATI/fragment_shader.txt
|
| 30 |
+
'''
|
| 31 |
+
from OpenGL import platform, constant, arrays
|
| 32 |
+
from OpenGL import extensions, wrapper
|
| 33 |
+
import ctypes
|
| 34 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 35 |
+
from OpenGL.raw.GL.ATI.fragment_shader import *
|
| 36 |
+
from OpenGL.raw.GL.ATI.fragment_shader import _EXTENSION_NAME
|
| 37 |
+
|
| 38 |
+
def glInitFragmentShaderATI():
|
| 39 |
+
'''Return boolean indicating whether this extension is available'''
|
| 40 |
+
from OpenGL import extensions
|
| 41 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 42 |
+
|
| 43 |
+
glSetFragmentShaderConstantATI=wrapper.wrapper(glSetFragmentShaderConstantATI).setInputArraySize(
|
| 44 |
+
'value', 4
|
| 45 |
+
)
|
| 46 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/map_object_buffer.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.map_object_buffer
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.map_object_buffer to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension provides a mechanism for an application to obtain
|
| 10 |
+
the virtual address of an object buffer. This allows the
|
| 11 |
+
application to directly update the contents of an object buffer
|
| 12 |
+
and avoid any intermediate copies.
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
The official definition of this extension is available here:
|
| 16 |
+
http://www.opengl.org/registry/specs/ATI/map_object_buffer.txt
|
| 17 |
+
'''
|
| 18 |
+
from OpenGL import platform, constant, arrays
|
| 19 |
+
from OpenGL import extensions, wrapper
|
| 20 |
+
import ctypes
|
| 21 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 22 |
+
from OpenGL.raw.GL.ATI.map_object_buffer import *
|
| 23 |
+
from OpenGL.raw.GL.ATI.map_object_buffer import _EXTENSION_NAME
|
| 24 |
+
|
| 25 |
+
def glInitMapObjectBufferATI():
|
| 26 |
+
'''Return boolean indicating whether this extension is available'''
|
| 27 |
+
from OpenGL import extensions
|
| 28 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/meminfo.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.meminfo
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.meminfo to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
Traditionally, OpenGL has treated resource management as a task of hardware
|
| 10 |
+
virtualization hidden from applications. While providing great portability,
|
| 11 |
+
this shielding of information can prevent applications from making
|
| 12 |
+
intelligent decisions on the management of resources they create. For
|
| 13 |
+
instance, an application may be better served by choosing a different
|
| 14 |
+
rendering method if there is not sufficient resources to efficiently
|
| 15 |
+
utilize its preferred method.
|
| 16 |
+
|
| 17 |
+
The official definition of this extension is available here:
|
| 18 |
+
http://www.opengl.org/registry/specs/ATI/meminfo.txt
|
| 19 |
+
'''
|
| 20 |
+
from OpenGL import platform, constant, arrays
|
| 21 |
+
from OpenGL import extensions, wrapper
|
| 22 |
+
import ctypes
|
| 23 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 24 |
+
from OpenGL.raw.GL.ATI.meminfo import *
|
| 25 |
+
from OpenGL.raw.GL.ATI.meminfo import _EXTENSION_NAME
|
| 26 |
+
|
| 27 |
+
def glInitMeminfoATI():
|
| 28 |
+
'''Return boolean indicating whether this extension is available'''
|
| 29 |
+
from OpenGL import extensions
|
| 30 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/pixel_format_float.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.pixel_format_float
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.pixel_format_float to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension adds pixel formats with floating-point RGBA color
|
| 10 |
+
components.
|
| 11 |
+
|
| 12 |
+
The size of each float components is specified using the same
|
| 13 |
+
WGL_RED_BITS_ARB, WGL_GREEN_BITS_ARB, WGL_BLUE_BITS_ARB and
|
| 14 |
+
WGL_ALPHA_BITS_ARB pixel format attributes that are used for
|
| 15 |
+
defining the size of fixed-point components. 32 bit floating-
|
| 16 |
+
point components are in the standard IEEE float format. 16 bit
|
| 17 |
+
floating-point components have 1 sign bit, 5 exponent bits,
|
| 18 |
+
and 10 mantissa bits.
|
| 19 |
+
|
| 20 |
+
In standard OpenGL RGBA color components are normally clamped to
|
| 21 |
+
the range [0,1]. The color components of a float buffer are
|
| 22 |
+
clamped to the limits of the range representable by their format.
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
The official definition of this extension is available here:
|
| 26 |
+
http://www.opengl.org/registry/specs/ATI/pixel_format_float.txt
|
| 27 |
+
'''
|
| 28 |
+
from OpenGL import platform, constant, arrays
|
| 29 |
+
from OpenGL import extensions, wrapper
|
| 30 |
+
import ctypes
|
| 31 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 32 |
+
from OpenGL.raw.GL.ATI.pixel_format_float import *
|
| 33 |
+
from OpenGL.raw.GL.ATI.pixel_format_float import _EXTENSION_NAME
|
| 34 |
+
|
| 35 |
+
def glInitPixelFormatFloatATI():
|
| 36 |
+
'''Return boolean indicating whether this extension is available'''
|
| 37 |
+
from OpenGL import extensions
|
| 38 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/pn_triangles.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.pn_triangles
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.pn_triangles to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
ATI_pn_triangles provides a path for enabling the GL to internally
|
| 10 |
+
tessellate input geometry into curved patches. The extension allows the
|
| 11 |
+
user to tune the amount of tessellation to be performed on each triangle as
|
| 12 |
+
a global state value. The intent of PN Triangle tessellation is
|
| 13 |
+
typically to produce geometry with a smoother silhouette and more organic
|
| 14 |
+
shape.
|
| 15 |
+
|
| 16 |
+
The tessellated patch will replace the triangles input into the GL.
|
| 17 |
+
The GL will generate new vertices in object-space, prior to geometry
|
| 18 |
+
transformation. Only the vertices and normals are required to produce
|
| 19 |
+
proper results, and the rest of the information per vertex is interpolated
|
| 20 |
+
linearly across the patch.
|
| 21 |
+
|
| 22 |
+
The official definition of this extension is available here:
|
| 23 |
+
http://www.opengl.org/registry/specs/ATI/pn_triangles.txt
|
| 24 |
+
'''
|
| 25 |
+
from OpenGL import platform, constant, arrays
|
| 26 |
+
from OpenGL import extensions, wrapper
|
| 27 |
+
import ctypes
|
| 28 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 29 |
+
from OpenGL.raw.GL.ATI.pn_triangles import *
|
| 30 |
+
from OpenGL.raw.GL.ATI.pn_triangles import _EXTENSION_NAME
|
| 31 |
+
|
| 32 |
+
def glInitPnTrianglesATI():
|
| 33 |
+
'''Return boolean indicating whether this extension is available'''
|
| 34 |
+
from OpenGL import extensions
|
| 35 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/separate_stencil.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.separate_stencil
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.separate_stencil to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension provides the ability to modify the stencil buffer
|
| 10 |
+
differently based on the facing direction of the primitive that
|
| 11 |
+
generated the fragment.
|
| 12 |
+
|
| 13 |
+
The official definition of this extension is available here:
|
| 14 |
+
http://www.opengl.org/registry/specs/ATI/separate_stencil.txt
|
| 15 |
+
'''
|
| 16 |
+
from OpenGL import platform, constant, arrays
|
| 17 |
+
from OpenGL import extensions, wrapper
|
| 18 |
+
import ctypes
|
| 19 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 20 |
+
from OpenGL.raw.GL.ATI.separate_stencil import *
|
| 21 |
+
from OpenGL.raw.GL.ATI.separate_stencil import _EXTENSION_NAME
|
| 22 |
+
|
| 23 |
+
def glInitSeparateStencilATI():
|
| 24 |
+
'''Return boolean indicating whether this extension is available'''
|
| 25 |
+
from OpenGL import extensions
|
| 26 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/text_fragment_shader.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.text_fragment_shader
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.text_fragment_shader to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
The ATI_fragment_shader extension exposes a powerful fragment
|
| 10 |
+
processing model that provides a very general means of expressing
|
| 11 |
+
fragment color blending and dependent texture address modification.
|
| 12 |
+
The processing is termed a fragment shader or fragment program and
|
| 13 |
+
is specifed using a register-based model in which there are fixed
|
| 14 |
+
numbers of instructions, texture lookups, read/write registers, and
|
| 15 |
+
constants.
|
| 16 |
+
|
| 17 |
+
ATI_fragment_shader provides a unified instruction set
|
| 18 |
+
for operating on address or color data and eliminates the
|
| 19 |
+
distinction between the two. That extension provides all the
|
| 20 |
+
interfaces necessary to fully expose this programmable fragment
|
| 21 |
+
processor in GL.
|
| 22 |
+
|
| 23 |
+
ATI_text_fragment_shader is a redefinition of the
|
| 24 |
+
ATI_fragment_shader functionality, using a slightly different
|
| 25 |
+
interface. The intent of creating ATI_text_fragment_shader is to
|
| 26 |
+
take a step towards treating fragment programs similar to other
|
| 27 |
+
programmable parts of the GL rendering pipeline, specifically
|
| 28 |
+
vertex programs. This new interface is intended to appear
|
| 29 |
+
similar to the ARB_vertex_program API, within the limits of the
|
| 30 |
+
feature set exposed by the original ATI_fragment_shader extension.
|
| 31 |
+
|
| 32 |
+
The most significant differences between the two extensions are:
|
| 33 |
+
|
| 34 |
+
(1) ATI_fragment_shader provides a procedural function call
|
| 35 |
+
interface to specify the fragment program, whereas
|
| 36 |
+
ATI_text_fragment_shader uses a textual string to specify
|
| 37 |
+
the program. The fundamental syntax and constructs of the
|
| 38 |
+
program "language" remain the same.
|
| 39 |
+
|
| 40 |
+
(2) The program object managment portions of the interface,
|
| 41 |
+
namely the routines used to create, bind, and delete program
|
| 42 |
+
objects and set program constants are managed
|
| 43 |
+
using the framework defined by ARB_vertex_program.
|
| 44 |
+
|
| 45 |
+
(3) ATI_fragment_shader refers to the description of the
|
| 46 |
+
programmable fragment processing as a "fragment shader".
|
| 47 |
+
In keeping with the desire to treat all programmable parts
|
| 48 |
+
of the pipeline consistently, ATI_text_fragment_shader refers
|
| 49 |
+
to these as "fragment programs". The name of the extension is
|
| 50 |
+
left as ATI_text_fragment_shader instead of
|
| 51 |
+
ATI_text_fragment_program in order to indicate the underlying
|
| 52 |
+
similarity between the API's of the two extensions, and to
|
| 53 |
+
differentiate it from any other potential extensions that
|
| 54 |
+
may be able to move even further in the direction of treating
|
| 55 |
+
fragment programs as just another programmable area of the
|
| 56 |
+
GL pipeline.
|
| 57 |
+
|
| 58 |
+
Although ATI_fragment_shader was originally conceived as a
|
| 59 |
+
device-independent extension that would expose the capabilities of
|
| 60 |
+
future generations of hardware, changing trends in programmable
|
| 61 |
+
hardware have affected the lifespan of this extension. For this
|
| 62 |
+
reason you will now find a fixed set of features and resources
|
| 63 |
+
exposed, and the queries to determine this set have been deprecated
|
| 64 |
+
in ATI_fragment_shader. Further, in ATI_text_fragment_shader,
|
| 65 |
+
most of these resource limits are fixed by the text grammar and
|
| 66 |
+
the queries have been removed altogether.
|
| 67 |
+
|
| 68 |
+
The official definition of this extension is available here:
|
| 69 |
+
http://www.opengl.org/registry/specs/ATI/text_fragment_shader.txt
|
| 70 |
+
'''
|
| 71 |
+
from OpenGL import platform, constant, arrays
|
| 72 |
+
from OpenGL import extensions, wrapper
|
| 73 |
+
import ctypes
|
| 74 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 75 |
+
from OpenGL.raw.GL.ATI.text_fragment_shader import *
|
| 76 |
+
from OpenGL.raw.GL.ATI.text_fragment_shader import _EXTENSION_NAME
|
| 77 |
+
|
| 78 |
+
def glInitTextFragmentShaderATI():
|
| 79 |
+
'''Return boolean indicating whether this extension is available'''
|
| 80 |
+
from OpenGL import extensions
|
| 81 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/texture_env_combine3.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.texture_env_combine3
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.texture_env_combine3 to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
Adds new set of operations to the texture combiner operations.
|
| 10 |
+
|
| 11 |
+
MODULATE_ADD_ATI Arg0 * Arg2 + Arg1
|
| 12 |
+
MODULATE_SIGNED_ADD_ATI Arg0 * Arg2 + Arg1 - 0.5
|
| 13 |
+
MODULATE_SUBTRACT_ATI Arg0 * Arg2 - Arg1
|
| 14 |
+
|
| 15 |
+
where Arg0, Arg1 and Arg2 are derived from
|
| 16 |
+
|
| 17 |
+
PRIMARY_COLOR_ARB primary color of incoming fragment
|
| 18 |
+
TEXTURE texture color of corresponding texture unit
|
| 19 |
+
CONSTANT_ARB texture environment constant color
|
| 20 |
+
PREVIOUS_ARB result of previous texture environment; on
|
| 21 |
+
texture unit 0, this maps to PRIMARY_COLOR_ARB
|
| 22 |
+
|
| 23 |
+
In addition, the result may be scaled by 1.0, 2.0 or 4.0.
|
| 24 |
+
|
| 25 |
+
Note that in addition to providing more flexible equations new source
|
| 26 |
+
inputs have been added for zero and one.
|
| 27 |
+
|
| 28 |
+
The official definition of this extension is available here:
|
| 29 |
+
http://www.opengl.org/registry/specs/ATI/texture_env_combine3.txt
|
| 30 |
+
'''
|
| 31 |
+
from OpenGL import platform, constant, arrays
|
| 32 |
+
from OpenGL import extensions, wrapper
|
| 33 |
+
import ctypes
|
| 34 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 35 |
+
from OpenGL.raw.GL.ATI.texture_env_combine3 import *
|
| 36 |
+
from OpenGL.raw.GL.ATI.texture_env_combine3 import _EXTENSION_NAME
|
| 37 |
+
|
| 38 |
+
def glInitTextureEnvCombine3ATI():
|
| 39 |
+
'''Return boolean indicating whether this extension is available'''
|
| 40 |
+
from OpenGL import extensions
|
| 41 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/texture_float.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.texture_float
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.texture_float to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension adds texture internal formats with 32 and 16 bit
|
| 10 |
+
floating-point components. The 32 bit floating-point components
|
| 11 |
+
are in the standard IEEE float format. The 16 bit floating-point
|
| 12 |
+
components have 1 sign bit, 5 exponent bits, and 10 mantissa bits.
|
| 13 |
+
Floating-point components are clamped to the limits of the range
|
| 14 |
+
representable by their format.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
The official definition of this extension is available here:
|
| 18 |
+
http://www.opengl.org/registry/specs/ATI/texture_float.txt
|
| 19 |
+
'''
|
| 20 |
+
from OpenGL import platform, constant, arrays
|
| 21 |
+
from OpenGL import extensions, wrapper
|
| 22 |
+
import ctypes
|
| 23 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 24 |
+
from OpenGL.raw.GL.ATI.texture_float import *
|
| 25 |
+
from OpenGL.raw.GL.ATI.texture_float import _EXTENSION_NAME
|
| 26 |
+
|
| 27 |
+
def glInitTextureFloatATI():
|
| 28 |
+
'''Return boolean indicating whether this extension is available'''
|
| 29 |
+
from OpenGL import extensions
|
| 30 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/texture_mirror_once.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.texture_mirror_once
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.texture_mirror_once to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
ATI_texture_mirror_once extends the set of texture wrap modes to
|
| 10 |
+
include two modes (GL_MIRROR_CLAMP_ATI, GL_MIRROR_CLAMP_TO_EDGE_ATI)
|
| 11 |
+
that effectively use a texture map twice as large as the original image
|
| 12 |
+
in which the additional half of the new image is a mirror image of the
|
| 13 |
+
original image.
|
| 14 |
+
|
| 15 |
+
This new mode relaxes the need to generate images whose opposite edges
|
| 16 |
+
match by using the original image to generate a matching "mirror image".
|
| 17 |
+
This mode allows the texture to be mirrored only once in the negative
|
| 18 |
+
s, t, and r directions.
|
| 19 |
+
|
| 20 |
+
The official definition of this extension is available here:
|
| 21 |
+
http://www.opengl.org/registry/specs/ATI/texture_mirror_once.txt
|
| 22 |
+
'''
|
| 23 |
+
from OpenGL import platform, constant, arrays
|
| 24 |
+
from OpenGL import extensions, wrapper
|
| 25 |
+
import ctypes
|
| 26 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 27 |
+
from OpenGL.raw.GL.ATI.texture_mirror_once import *
|
| 28 |
+
from OpenGL.raw.GL.ATI.texture_mirror_once import _EXTENSION_NAME
|
| 29 |
+
|
| 30 |
+
def glInitTextureMirrorOnceATI():
|
| 31 |
+
'''Return boolean indicating whether this extension is available'''
|
| 32 |
+
from OpenGL import extensions
|
| 33 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/vertex_array_object.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.vertex_array_object
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.vertex_array_object to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension defines an interface that allows multiple sets of
|
| 10 |
+
vertex array data to be cached in persistent server-side memory.
|
| 11 |
+
It is intended to allow client data to be stored in memory that
|
| 12 |
+
can be directly accessed by graphics hardware.
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
The official definition of this extension is available here:
|
| 16 |
+
http://www.opengl.org/registry/specs/ATI/vertex_array_object.txt
|
| 17 |
+
'''
|
| 18 |
+
from OpenGL import platform, constant, arrays
|
| 19 |
+
from OpenGL import extensions, wrapper
|
| 20 |
+
import ctypes
|
| 21 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 22 |
+
from OpenGL.raw.GL.ATI.vertex_array_object import *
|
| 23 |
+
from OpenGL.raw.GL.ATI.vertex_array_object import _EXTENSION_NAME
|
| 24 |
+
|
| 25 |
+
def glInitVertexArrayObjectATI():
|
| 26 |
+
'''Return boolean indicating whether this extension is available'''
|
| 27 |
+
from OpenGL import extensions
|
| 28 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 29 |
+
|
| 30 |
+
# INPUT glNewObjectBufferATI.pointer size not checked against size
|
| 31 |
+
glNewObjectBufferATI=wrapper.wrapper(glNewObjectBufferATI).setInputArraySize(
|
| 32 |
+
'pointer', None
|
| 33 |
+
)
|
| 34 |
+
# INPUT glUpdateObjectBufferATI.pointer size not checked against size
|
| 35 |
+
glUpdateObjectBufferATI=wrapper.wrapper(glUpdateObjectBufferATI).setInputArraySize(
|
| 36 |
+
'pointer', None
|
| 37 |
+
)
|
| 38 |
+
glGetObjectBufferfvATI=wrapper.wrapper(glGetObjectBufferfvATI).setOutput(
|
| 39 |
+
'params',size=(1,),orPassIn=True
|
| 40 |
+
)
|
| 41 |
+
glGetObjectBufferivATI=wrapper.wrapper(glGetObjectBufferivATI).setOutput(
|
| 42 |
+
'params',size=(1,),orPassIn=True
|
| 43 |
+
)
|
| 44 |
+
glGetArrayObjectfvATI=wrapper.wrapper(glGetArrayObjectfvATI).setOutput(
|
| 45 |
+
'params',size=(1,),orPassIn=True
|
| 46 |
+
)
|
| 47 |
+
glGetArrayObjectivATI=wrapper.wrapper(glGetArrayObjectivATI).setOutput(
|
| 48 |
+
'params',size=(1,),orPassIn=True
|
| 49 |
+
)
|
| 50 |
+
glGetVariantArrayObjectfvATI=wrapper.wrapper(glGetVariantArrayObjectfvATI).setOutput(
|
| 51 |
+
'params',size=(1,),orPassIn=True
|
| 52 |
+
)
|
| 53 |
+
glGetVariantArrayObjectivATI=wrapper.wrapper(glGetVariantArrayObjectivATI).setOutput(
|
| 54 |
+
'params',size=(1,),orPassIn=True
|
| 55 |
+
)
|
| 56 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/vertex_attrib_array_object.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.vertex_attrib_array_object
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.vertex_attrib_array_object to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension defines an interface that allows multiple sets of
|
| 10 |
+
generic vertex attribute data to be cached in persistent server-side
|
| 11 |
+
memory. It is intended to allow client data to be stored in memory
|
| 12 |
+
that can be directly accessed by graphics hardware.
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
The official definition of this extension is available here:
|
| 16 |
+
http://www.opengl.org/registry/specs/ATI/vertex_attrib_array_object.txt
|
| 17 |
+
'''
|
| 18 |
+
from OpenGL import platform, constant, arrays
|
| 19 |
+
from OpenGL import extensions, wrapper
|
| 20 |
+
import ctypes
|
| 21 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 22 |
+
from OpenGL.raw.GL.ATI.vertex_attrib_array_object import *
|
| 23 |
+
from OpenGL.raw.GL.ATI.vertex_attrib_array_object import _EXTENSION_NAME
|
| 24 |
+
|
| 25 |
+
def glInitVertexAttribArrayObjectATI():
|
| 26 |
+
'''Return boolean indicating whether this extension is available'''
|
| 27 |
+
from OpenGL import extensions
|
| 28 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 29 |
+
|
| 30 |
+
glGetVertexAttribArrayObjectfvATI=wrapper.wrapper(glGetVertexAttribArrayObjectfvATI).setOutput(
|
| 31 |
+
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
|
| 32 |
+
)
|
| 33 |
+
glGetVertexAttribArrayObjectivATI=wrapper.wrapper(glGetVertexAttribArrayObjectivATI).setOutput(
|
| 34 |
+
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
|
| 35 |
+
)
|
| 36 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/ATI/vertex_streams.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''OpenGL extension ATI.vertex_streams
|
| 2 |
+
|
| 3 |
+
This module customises the behaviour of the
|
| 4 |
+
OpenGL.raw.GL.ATI.vertex_streams to provide a more
|
| 5 |
+
Python-friendly API
|
| 6 |
+
|
| 7 |
+
Overview (from the spec)
|
| 8 |
+
|
| 9 |
+
This extension adds the ability to handle sets of auxilliary
|
| 10 |
+
vertex and normal coordinates. These sets of auxilliary
|
| 11 |
+
coordinates are termed streams, and can be routed selectively
|
| 12 |
+
into the blend stages provided by the vertex blending extension.
|
| 13 |
+
This functionality enables software animation techniques such
|
| 14 |
+
as keyframe vertex morphing.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
The official definition of this extension is available here:
|
| 19 |
+
http://www.opengl.org/registry/specs/ATI/vertex_streams.txt
|
| 20 |
+
'''
|
| 21 |
+
from OpenGL import platform, constant, arrays
|
| 22 |
+
from OpenGL import extensions, wrapper
|
| 23 |
+
import ctypes
|
| 24 |
+
from OpenGL.raw.GL import _types, _glgets
|
| 25 |
+
from OpenGL.raw.GL.ATI.vertex_streams import *
|
| 26 |
+
from OpenGL.raw.GL.ATI.vertex_streams import _EXTENSION_NAME
|
| 27 |
+
|
| 28 |
+
def glInitVertexStreamsATI():
|
| 29 |
+
'''Return boolean indicating whether this extension is available'''
|
| 30 |
+
from OpenGL import extensions
|
| 31 |
+
return extensions.hasGLExtension( _EXTENSION_NAME )
|
| 32 |
+
|
| 33 |
+
glVertexStream1svATI=wrapper.wrapper(glVertexStream1svATI).setInputArraySize(
|
| 34 |
+
'coords', 1
|
| 35 |
+
)
|
| 36 |
+
glVertexStream1ivATI=wrapper.wrapper(glVertexStream1ivATI).setInputArraySize(
|
| 37 |
+
'coords', 1
|
| 38 |
+
)
|
| 39 |
+
glVertexStream1fvATI=wrapper.wrapper(glVertexStream1fvATI).setInputArraySize(
|
| 40 |
+
'coords', 1
|
| 41 |
+
)
|
| 42 |
+
glVertexStream1dvATI=wrapper.wrapper(glVertexStream1dvATI).setInputArraySize(
|
| 43 |
+
'coords', 1
|
| 44 |
+
)
|
| 45 |
+
glVertexStream2svATI=wrapper.wrapper(glVertexStream2svATI).setInputArraySize(
|
| 46 |
+
'coords', 2
|
| 47 |
+
)
|
| 48 |
+
glVertexStream2ivATI=wrapper.wrapper(glVertexStream2ivATI).setInputArraySize(
|
| 49 |
+
'coords', 2
|
| 50 |
+
)
|
| 51 |
+
glVertexStream2fvATI=wrapper.wrapper(glVertexStream2fvATI).setInputArraySize(
|
| 52 |
+
'coords', 2
|
| 53 |
+
)
|
| 54 |
+
glVertexStream2dvATI=wrapper.wrapper(glVertexStream2dvATI).setInputArraySize(
|
| 55 |
+
'coords', 2
|
| 56 |
+
)
|
| 57 |
+
glVertexStream3svATI=wrapper.wrapper(glVertexStream3svATI).setInputArraySize(
|
| 58 |
+
'coords', 3
|
| 59 |
+
)
|
| 60 |
+
glVertexStream3ivATI=wrapper.wrapper(glVertexStream3ivATI).setInputArraySize(
|
| 61 |
+
'coords', 3
|
| 62 |
+
)
|
| 63 |
+
glVertexStream3fvATI=wrapper.wrapper(glVertexStream3fvATI).setInputArraySize(
|
| 64 |
+
'coords', 3
|
| 65 |
+
)
|
| 66 |
+
glVertexStream3dvATI=wrapper.wrapper(glVertexStream3dvATI).setInputArraySize(
|
| 67 |
+
'coords', 3
|
| 68 |
+
)
|
| 69 |
+
glVertexStream4svATI=wrapper.wrapper(glVertexStream4svATI).setInputArraySize(
|
| 70 |
+
'coords', 4
|
| 71 |
+
)
|
| 72 |
+
glVertexStream4ivATI=wrapper.wrapper(glVertexStream4ivATI).setInputArraySize(
|
| 73 |
+
'coords', 4
|
| 74 |
+
)
|
| 75 |
+
glVertexStream4fvATI=wrapper.wrapper(glVertexStream4fvATI).setInputArraySize(
|
| 76 |
+
'coords', 4
|
| 77 |
+
)
|
| 78 |
+
glVertexStream4dvATI=wrapper.wrapper(glVertexStream4dvATI).setInputArraySize(
|
| 79 |
+
'coords', 4
|
| 80 |
+
)
|
| 81 |
+
glNormalStream3bvATI=wrapper.wrapper(glNormalStream3bvATI).setInputArraySize(
|
| 82 |
+
'coords', 3
|
| 83 |
+
)
|
| 84 |
+
glNormalStream3svATI=wrapper.wrapper(glNormalStream3svATI).setInputArraySize(
|
| 85 |
+
'coords', 3
|
| 86 |
+
)
|
| 87 |
+
glNormalStream3ivATI=wrapper.wrapper(glNormalStream3ivATI).setInputArraySize(
|
| 88 |
+
'coords', 3
|
| 89 |
+
)
|
| 90 |
+
glNormalStream3fvATI=wrapper.wrapper(glNormalStream3fvATI).setInputArraySize(
|
| 91 |
+
'coords', 3
|
| 92 |
+
)
|
| 93 |
+
glNormalStream3dvATI=wrapper.wrapper(glNormalStream3dvATI).setInputArraySize(
|
| 94 |
+
'coords', 3
|
| 95 |
+
)
|
| 96 |
+
### END AUTOGENERATED SECTION
|
vllm/lib/python3.10/site-packages/OpenGL/GL/KHR/__pycache__/texture_compression_astc_ldr.cpython-310.pyc
ADDED
|
Binary file (1.08 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (193 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/multisample.cpython-310.pyc
ADDED
|
Binary file (2.51 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/pixel_texture.cpython-310.pyc
ADDED
|
Binary file (3.66 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/point_line_texgen.cpython-310.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/sharpen_texture.cpython-310.pyc
ADDED
|
Binary file (1.52 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/texture4D.cpython-310.pyc
ADDED
|
Binary file (2.29 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/texture_border_clamp.cpython-310.pyc
ADDED
|
Binary file (1.91 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/texture_color_mask.cpython-310.pyc
ADDED
|
Binary file (1.59 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/OpenGL/GL/SGIS/__pycache__/texture_edge_clamp.cpython-310.pyc
ADDED
|
Binary file (2.07 kB). View file
|
|
|