content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
from __future__ import annotations\n\nfrom collections.abc import Iterable\nfrom typing import (\n TYPE_CHECKING,\n Literal,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas.util._decorators import (\n cache_readonly,\n doc,\n)\n\nfrom pandas.core.dtypes.common import (\n is_integer,\n is_list_like,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import PositionalIndexer\n\n from pandas import (\n DataFrame,\n Series,\n )\n from pandas.core.groupby import groupby\n\n\nclass GroupByIndexingMixin:\n """\n Mixin for adding ._positional_selector to GroupBy.\n """\n\n @cache_readonly\n def _positional_selector(self) -> GroupByPositionalSelector:\n """\n Return positional selection for each group.\n\n ``groupby._positional_selector[i:j]`` is similar to\n ``groupby.apply(lambda x: x.iloc[i:j])``\n but much faster and preserves the original index and order.\n\n ``_positional_selector[]`` is compatible with and extends :meth:`~GroupBy.head`\n and :meth:`~GroupBy.tail`. For example:\n\n - ``head(5)``\n - ``_positional_selector[5:-5]``\n - ``tail(5)``\n\n together return all the rows.\n\n Allowed inputs for the index are:\n\n - An integer valued iterable, e.g. ``range(2, 4)``.\n - A comma separated list of integers and slices, e.g. ``5``, ``2, 4``, ``2:4``.\n\n The output format is the same as :meth:`~GroupBy.head` and\n :meth:`~GroupBy.tail`, namely\n a subset of the ``DataFrame`` or ``Series`` with the index and order preserved.\n\n Returns\n -------\n Series\n The filtered subset of the original Series.\n DataFrame\n The filtered subset of the original DataFrame.\n\n See Also\n --------\n DataFrame.iloc : Purely integer-location based indexing for selection by\n position.\n GroupBy.head : Return first n rows of each group.\n GroupBy.tail : Return last n rows of each group.\n GroupBy.nth : Take the nth row from each group if n is an int, or a\n subset of rows, if n is a list of ints.\n\n Notes\n -----\n - The slice step cannot be negative.\n - If the index specification results in overlaps, the item is not duplicated.\n - If the index specification changes the order of items, then\n they are returned in their original order.\n By contrast, ``DataFrame.iloc`` can change the row order.\n - ``groupby()`` parameters such as as_index and dropna are ignored.\n\n The differences between ``_positional_selector[]`` and :meth:`~GroupBy.nth`\n with ``as_index=False`` are:\n\n - Input to ``_positional_selector`` can include\n one or more slices whereas ``nth``\n just handles an integer or a list of integers.\n - ``_positional_selector`` can accept a slice relative to the\n last row of each group.\n - ``_positional_selector`` does not have an equivalent to the\n ``nth()`` ``dropna`` parameter.\n\n Examples\n --------\n >>> df = pd.DataFrame([["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]],\n ... columns=["A", "B"])\n >>> df.groupby("A")._positional_selector[1:2]\n A B\n 1 a 2\n 4 b 5\n\n >>> df.groupby("A")._positional_selector[1, -1]\n A B\n 1 a 2\n 2 a 3\n 4 b 5\n """\n if TYPE_CHECKING:\n # pylint: disable-next=used-before-assignment\n groupby_self = cast(groupby.GroupBy, self)\n else:\n groupby_self = self\n\n return GroupByPositionalSelector(groupby_self)\n\n def _make_mask_from_positional_indexer(\n self,\n arg: PositionalIndexer | tuple,\n ) -> np.ndarray:\n if is_list_like(arg):\n if all(is_integer(i) for i in cast(Iterable, arg)):\n mask = self._make_mask_from_list(cast(Iterable[int], arg))\n else:\n mask = self._make_mask_from_tuple(cast(tuple, arg))\n\n elif isinstance(arg, slice):\n mask = self._make_mask_from_slice(arg)\n elif is_integer(arg):\n mask = self._make_mask_from_int(cast(int, arg))\n else:\n raise TypeError(\n f"Invalid index {type(arg)}. "\n "Must be integer, list-like, slice or a tuple of "\n "integers and slices"\n )\n\n if isinstance(mask, bool):\n if mask:\n mask = self._ascending_count >= 0\n else:\n mask = self._ascending_count < 0\n\n return cast(np.ndarray, mask)\n\n def _make_mask_from_int(self, arg: int) -> np.ndarray:\n if arg >= 0:\n return self._ascending_count == arg\n else:\n return self._descending_count == (-arg - 1)\n\n def _make_mask_from_list(self, args: Iterable[int]) -> bool | np.ndarray:\n positive = [arg for arg in args if arg >= 0]\n negative = [-arg - 1 for arg in args if arg < 0]\n\n mask: bool | np.ndarray = False\n\n if positive:\n mask |= np.isin(self._ascending_count, positive)\n\n if negative:\n mask |= np.isin(self._descending_count, negative)\n\n return mask\n\n def _make_mask_from_tuple(self, args: tuple) -> bool | np.ndarray:\n mask: bool | np.ndarray = False\n\n for arg in args:\n if is_integer(arg):\n mask |= self._make_mask_from_int(cast(int, arg))\n elif isinstance(arg, slice):\n mask |= self._make_mask_from_slice(arg)\n else:\n raise ValueError(\n f"Invalid argument {type(arg)}. Should be int or slice."\n )\n\n return mask\n\n def _make_mask_from_slice(self, arg: slice) -> bool | np.ndarray:\n start = arg.start\n stop = arg.stop\n step = arg.step\n\n if step is not None and step < 0:\n raise ValueError(f"Invalid step {step}. Must be non-negative")\n\n mask: bool | np.ndarray = True\n\n if step is None:\n step = 1\n\n if start is None:\n if step > 1:\n mask &= self._ascending_count % step == 0\n\n elif start >= 0:\n mask &= self._ascending_count >= start\n\n if step > 1:\n mask &= (self._ascending_count - start) % step == 0\n\n else:\n mask &= self._descending_count < -start\n\n offset_array = self._descending_count + start + 1\n limit_array = (\n self._ascending_count + self._descending_count + (start + 1)\n ) < 0\n offset_array = np.where(limit_array, self._ascending_count, offset_array)\n\n mask &= offset_array % step == 0\n\n if stop is not None:\n if stop >= 0:\n mask &= self._ascending_count < stop\n else:\n mask &= self._descending_count >= -stop\n\n return mask\n\n @cache_readonly\n def _ascending_count(self) -> np.ndarray:\n if TYPE_CHECKING:\n groupby_self = cast(groupby.GroupBy, self)\n else:\n groupby_self = self\n\n return groupby_self._cumcount_array()\n\n @cache_readonly\n def _descending_count(self) -> np.ndarray:\n if TYPE_CHECKING:\n groupby_self = cast(groupby.GroupBy, self)\n else:\n groupby_self = self\n\n return groupby_self._cumcount_array(ascending=False)\n\n\n@doc(GroupByIndexingMixin._positional_selector)\nclass GroupByPositionalSelector:\n def __init__(self, groupby_object: groupby.GroupBy) -> None:\n self.groupby_object = groupby_object\n\n def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series:\n """\n Select by positional index per group.\n\n Implements GroupBy._positional_selector\n\n Parameters\n ----------\n arg : PositionalIndexer | tuple\n Allowed values are:\n - int\n - int valued iterable such as list or range\n - slice with step either None or positive\n - tuple of integers and slices\n\n Returns\n -------\n Series\n The filtered subset of the original groupby Series.\n DataFrame\n The filtered subset of the original groupby DataFrame.\n\n See Also\n --------\n DataFrame.iloc : Integer-location based indexing for selection by position.\n GroupBy.head : Return first n rows of each group.\n GroupBy.tail : Return last n rows of each group.\n GroupBy._positional_selector : Return positional selection for each group.\n GroupBy.nth : Take the nth row from each group if n is an int, or a\n subset of rows, if n is a list of ints.\n """\n mask = self.groupby_object._make_mask_from_positional_indexer(arg)\n return self.groupby_object._mask_selected_obj(mask)\n\n\nclass GroupByNthSelector:\n """\n Dynamically substituted for GroupBy.nth to enable both call and index\n """\n\n def __init__(self, groupby_object: groupby.GroupBy) -> None:\n self.groupby_object = groupby_object\n\n def __call__(\n self,\n n: PositionalIndexer | tuple,\n dropna: Literal["any", "all", None] = None,\n ) -> DataFrame | Series:\n return self.groupby_object._nth(n, dropna)\n\n def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series:\n return self.groupby_object._nth(n)\n | .venv\Lib\site-packages\pandas\core\groupby\indexing.py | indexing.py | Python | 9,510 | 0.95 | 0.171053 | 0.004184 | vue-tools | 702 | 2025-05-31T15:23:40.026939 | BSD-3-Clause | false | bbfa358fa711690ca73abf5742334c4e |
"""Common utilities for Numba operations with groupby ops"""\nfrom __future__ import annotations\n\nimport functools\nimport inspect\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n)\n\nimport numpy as np\n\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.core.util.numba_ import (\n NumbaUtilError,\n jit_user_function,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import Scalar\n\n\ndef validate_udf(func: Callable) -> None:\n """\n Validate user defined function for ops when using Numba with groupby ops.\n\n The first signature arguments should include:\n\n def f(values, index, ...):\n ...\n\n Parameters\n ----------\n func : function, default False\n user defined function\n\n Returns\n -------\n None\n\n Raises\n ------\n NumbaUtilError\n """\n if not callable(func):\n raise NotImplementedError(\n "Numba engine can only be used with a single function."\n )\n udf_signature = list(inspect.signature(func).parameters.keys())\n expected_args = ["values", "index"]\n min_number_args = len(expected_args)\n if (\n len(udf_signature) < min_number_args\n or udf_signature[:min_number_args] != expected_args\n ):\n raise NumbaUtilError(\n f"The first {min_number_args} arguments to {func.__name__} must be "\n f"{expected_args}"\n )\n\n\n@functools.cache\ndef generate_numba_agg_func(\n func: Callable[..., Scalar],\n nopython: bool,\n nogil: bool,\n parallel: bool,\n) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:\n """\n Generate a numba jitted agg function specified by values from engine_kwargs.\n\n 1. jit the user's function\n 2. Return a groupby agg function with the jitted function inline\n\n Configurations specified in engine_kwargs apply to both the user's\n function _AND_ the groupby evaluation loop.\n\n Parameters\n ----------\n func : function\n function to be applied to each group and will be JITed\n nopython : bool\n nopython to be passed into numba.jit\n nogil : bool\n nogil to be passed into numba.jit\n parallel : bool\n parallel to be passed into numba.jit\n\n Returns\n -------\n Numba function\n """\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def group_agg(\n values: np.ndarray,\n index: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n num_columns: int,\n *args: Any,\n ) -> np.ndarray:\n assert len(begin) == len(end)\n num_groups = len(begin)\n\n result = np.empty((num_groups, num_columns))\n for i in numba.prange(num_groups):\n group_index = index[begin[i] : end[i]]\n for j in numba.prange(num_columns):\n group = values[begin[i] : end[i], j]\n result[i, j] = numba_func(group, group_index, *args)\n return result\n\n return group_agg\n\n\n@functools.cache\ndef generate_numba_transform_func(\n func: Callable[..., np.ndarray],\n nopython: bool,\n nogil: bool,\n parallel: bool,\n) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:\n """\n Generate a numba jitted transform function specified by values from engine_kwargs.\n\n 1. jit the user's function\n 2. Return a groupby transform function with the jitted function inline\n\n Configurations specified in engine_kwargs apply to both the user's\n function _AND_ the groupby evaluation loop.\n\n Parameters\n ----------\n func : function\n function to be applied to each window and will be JITed\n nopython : bool\n nopython to be passed into numba.jit\n nogil : bool\n nogil to be passed into numba.jit\n parallel : bool\n parallel to be passed into numba.jit\n\n Returns\n -------\n Numba function\n """\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency("numba")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def group_transform(\n values: np.ndarray,\n index: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n num_columns: int,\n *args: Any,\n ) -> np.ndarray:\n assert len(begin) == len(end)\n num_groups = len(begin)\n\n result = np.empty((len(values), num_columns))\n for i in numba.prange(num_groups):\n group_index = index[begin[i] : end[i]]\n for j in numba.prange(num_columns):\n group = values[begin[i] : end[i], j]\n result[begin[i] : end[i], j] = numba_func(group, group_index, *args)\n return result\n\n return group_transform\n | .venv\Lib\site-packages\pandas\core\groupby\numba_.py | numba_.py | Python | 4,894 | 0.85 | 0.20442 | 0.013245 | awesome-app | 833 | 2023-08-28T06:40:36.512751 | BSD-3-Clause | false | 8287d5a87b92b6853d298b618e7b575f |
"""\nProvide classes to perform the groupby aggregate operations.\n\nThese are not exposed to the user and provide implementations of the grouping\noperations, primarily in cython. These classes (BaseGrouper and BinGrouper)\nare contained *in* the SeriesGroupBy and DataFrameGroupBy objects.\n"""\nfrom __future__ import annotations\n\nimport collections\nimport functools\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Generic,\n final,\n)\n\nimport numpy as np\n\nfrom pandas._libs import (\n NaT,\n lib,\n)\nimport pandas._libs.groupby as libgroupby\nfrom pandas._typing import (\n ArrayLike,\n AxisInt,\n NDFrameT,\n Shape,\n npt,\n)\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.cast import (\n maybe_cast_pointwise_result,\n maybe_downcast_to_dtype,\n)\nfrom pandas.core.dtypes.common import (\n ensure_float64,\n ensure_int64,\n ensure_platform_int,\n ensure_uint64,\n is_1d_only_ea_dtype,\n)\nfrom pandas.core.dtypes.missing import (\n isna,\n maybe_fill,\n)\n\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.groupby import grouper\nfrom pandas.core.indexes.api import (\n CategoricalIndex,\n Index,\n MultiIndex,\n ensure_index,\n)\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import (\n compress_group_index,\n decons_obs_group_ids,\n get_flattened_list,\n get_group_index,\n get_group_index_sorter,\n get_indexer_dict,\n)\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Iterator,\n Sequence,\n )\n\n from pandas.core.generic import NDFrame\n\n\ndef check_result_array(obj, dtype) -> None:\n # Our operation is supposed to be an aggregation/reduction. If\n # it returns an ndarray, this likely means an invalid operation has\n # been passed. See test_apply_without_aggregation, test_agg_must_agg\n if isinstance(obj, np.ndarray):\n if dtype != object:\n # If it is object dtype, the function can be a reduction/aggregation\n # and still return an ndarray e.g. test_agg_over_numpy_arrays\n raise ValueError("Must produce aggregated value")\n\n\ndef extract_result(res):\n """\n Extract the result object, it might be a 0-dim ndarray\n or a len-1 0-dim, or a scalar\n """\n if hasattr(res, "_values"):\n # Preserve EA\n res = res._values\n if res.ndim == 1 and len(res) == 1:\n # see test_agg_lambda_with_timezone, test_resampler_grouper.py::test_apply\n res = res[0]\n return res\n\n\nclass WrappedCythonOp:\n """\n Dispatch logic for functions defined in _libs.groupby\n\n Parameters\n ----------\n kind: str\n Whether the operation is an aggregate or transform.\n how: str\n Operation name, e.g. "mean".\n has_dropped_na: bool\n True precisely when dropna=True and the grouper contains a null value.\n """\n\n # Functions for which we do _not_ attempt to cast the cython result\n # back to the original dtype.\n cast_blocklist = frozenset(\n ["any", "all", "rank", "count", "size", "idxmin", "idxmax"]\n )\n\n def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:\n self.kind = kind\n self.how = how\n self.has_dropped_na = has_dropped_na\n\n _CYTHON_FUNCTIONS: dict[str, dict] = {\n "aggregate": {\n "any": functools.partial(libgroupby.group_any_all, val_test="any"),\n "all": functools.partial(libgroupby.group_any_all, val_test="all"),\n "sum": "group_sum",\n "prod": "group_prod",\n "idxmin": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmin"),\n "idxmax": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmax"),\n "min": "group_min",\n "max": "group_max",\n "mean": "group_mean",\n "median": "group_median_float64",\n "var": "group_var",\n "std": functools.partial(libgroupby.group_var, name="std"),\n "sem": functools.partial(libgroupby.group_var, name="sem"),\n "skew": "group_skew",\n "first": "group_nth",\n "last": "group_last",\n "ohlc": "group_ohlc",\n },\n "transform": {\n "cumprod": "group_cumprod",\n "cumsum": "group_cumsum",\n "cummin": "group_cummin",\n "cummax": "group_cummax",\n "rank": "group_rank",\n },\n }\n\n _cython_arity = {"ohlc": 4} # OHLC\n\n @classmethod\n def get_kind_from_how(cls, how: str) -> str:\n if how in cls._CYTHON_FUNCTIONS["aggregate"]:\n return "aggregate"\n return "transform"\n\n # Note: we make this a classmethod and pass kind+how so that caching\n # works at the class level and not the instance level\n @classmethod\n @functools.cache\n def _get_cython_function(\n cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool\n ):\n dtype_str = dtype.name\n ftype = cls._CYTHON_FUNCTIONS[kind][how]\n\n # see if there is a fused-type version of function\n # only valid for numeric\n if callable(ftype):\n f = ftype\n else:\n f = getattr(libgroupby, ftype)\n if is_numeric:\n return f\n elif dtype == np.dtype(object):\n if how in ["median", "cumprod"]:\n # no fused types -> no __signatures__\n raise NotImplementedError(\n f"function is not implemented for this dtype: "\n f"[how->{how},dtype->{dtype_str}]"\n )\n elif how in ["std", "sem", "idxmin", "idxmax"]:\n # We have a partial object that does not have __signatures__\n return f\n elif how == "skew":\n # _get_cython_vals will convert to float64\n pass\n elif "object" not in f.__signatures__:\n # raise NotImplementedError here rather than TypeError later\n raise NotImplementedError(\n f"function is not implemented for this dtype: "\n f"[how->{how},dtype->{dtype_str}]"\n )\n return f\n else:\n raise NotImplementedError(\n "This should not be reached. Please report a bug at "\n "github.com/pandas-dev/pandas/",\n dtype,\n )\n\n def _get_cython_vals(self, values: np.ndarray) -> np.ndarray:\n """\n Cast numeric dtypes to float64 for functions that only support that.\n\n Parameters\n ----------\n values : np.ndarray\n\n Returns\n -------\n values : np.ndarray\n """\n how = self.how\n\n if how in ["median", "std", "sem", "skew"]:\n # median only has a float64 implementation\n # We should only get here with is_numeric, as non-numeric cases\n # should raise in _get_cython_function\n values = ensure_float64(values)\n\n elif values.dtype.kind in "iu":\n if how in ["var", "mean"] or (\n self.kind == "transform" and self.has_dropped_na\n ):\n # has_dropped_na check need for test_null_group_str_transformer\n # result may still include NaN, so we have to cast\n values = ensure_float64(values)\n\n elif how in ["sum", "ohlc", "prod", "cumsum", "cumprod"]:\n # Avoid overflow during group op\n if values.dtype.kind == "i":\n values = ensure_int64(values)\n else:\n values = ensure_uint64(values)\n\n return values\n\n def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:\n how = self.how\n kind = self.kind\n\n arity = self._cython_arity.get(how, 1)\n\n out_shape: Shape\n if how == "ohlc":\n out_shape = (ngroups, arity)\n elif arity > 1:\n raise NotImplementedError(\n "arity of more than 1 is not supported for the 'how' argument"\n )\n elif kind == "transform":\n out_shape = values.shape\n else:\n out_shape = (ngroups,) + values.shape[1:]\n return out_shape\n\n def _get_out_dtype(self, dtype: np.dtype) -> np.dtype:\n how = self.how\n\n if how == "rank":\n out_dtype = "float64"\n elif how in ["idxmin", "idxmax"]:\n # The Cython implementation only produces the row number; we'll take\n # from the index using this in post processing\n out_dtype = "intp"\n else:\n if dtype.kind in "iufcb":\n out_dtype = f"{dtype.kind}{dtype.itemsize}"\n else:\n out_dtype = "object"\n return np.dtype(out_dtype)\n\n def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:\n """\n Get the desired dtype of a result based on the\n input dtype and how it was computed.\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n np.dtype\n The desired dtype of the result.\n """\n how = self.how\n\n if how in ["sum", "cumsum", "sum", "prod", "cumprod"]:\n if dtype == np.dtype(bool):\n return np.dtype(np.int64)\n elif how in ["mean", "median", "var", "std", "sem"]:\n if dtype.kind in "fc":\n return dtype\n elif dtype.kind in "iub":\n return np.dtype(np.float64)\n return dtype\n\n @final\n def _cython_op_ndim_compat(\n self,\n values: np.ndarray,\n *,\n min_count: int,\n ngroups: int,\n comp_ids: np.ndarray,\n mask: npt.NDArray[np.bool_] | None = None,\n result_mask: npt.NDArray[np.bool_] | None = None,\n **kwargs,\n ) -> np.ndarray:\n if values.ndim == 1:\n # expand to 2d, dispatch, then squeeze if appropriate\n values2d = values[None, :]\n if mask is not None:\n mask = mask[None, :]\n if result_mask is not None:\n result_mask = result_mask[None, :]\n res = self._call_cython_op(\n values2d,\n min_count=min_count,\n ngroups=ngroups,\n comp_ids=comp_ids,\n mask=mask,\n result_mask=result_mask,\n **kwargs,\n )\n if res.shape[0] == 1:\n return res[0]\n\n # otherwise we have OHLC\n return res.T\n\n return self._call_cython_op(\n values,\n min_count=min_count,\n ngroups=ngroups,\n comp_ids=comp_ids,\n mask=mask,\n result_mask=result_mask,\n **kwargs,\n )\n\n @final\n def _call_cython_op(\n self,\n values: np.ndarray, # np.ndarray[ndim=2]\n *,\n min_count: int,\n ngroups: int,\n comp_ids: np.ndarray,\n mask: npt.NDArray[np.bool_] | None,\n result_mask: npt.NDArray[np.bool_] | None,\n **kwargs,\n ) -> np.ndarray: # np.ndarray[ndim=2]\n orig_values = values\n\n dtype = values.dtype\n is_numeric = dtype.kind in "iufcb"\n\n is_datetimelike = dtype.kind in "mM"\n\n if is_datetimelike:\n values = values.view("int64")\n is_numeric = True\n elif dtype.kind == "b":\n values = values.view("uint8")\n if values.dtype == "float16":\n values = values.astype(np.float32)\n\n if self.how in ["any", "all"]:\n if mask is None:\n mask = isna(values)\n if dtype == object:\n if kwargs["skipna"]:\n # GH#37501: don't raise on pd.NA when skipna=True\n if mask.any():\n # mask on original values computed separately\n values = values.copy()\n values[mask] = True\n values = values.astype(bool, copy=False).view(np.int8)\n is_numeric = True\n\n values = values.T\n if mask is not None:\n mask = mask.T\n if result_mask is not None:\n result_mask = result_mask.T\n\n out_shape = self._get_output_shape(ngroups, values)\n func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric)\n values = self._get_cython_vals(values)\n out_dtype = self._get_out_dtype(values.dtype)\n\n result = maybe_fill(np.empty(out_shape, dtype=out_dtype))\n if self.kind == "aggregate":\n counts = np.zeros(ngroups, dtype=np.int64)\n if self.how in [\n "idxmin",\n "idxmax",\n "min",\n "max",\n "mean",\n "last",\n "first",\n "sum",\n ]:\n func(\n out=result,\n counts=counts,\n values=values,\n labels=comp_ids,\n min_count=min_count,\n mask=mask,\n result_mask=result_mask,\n is_datetimelike=is_datetimelike,\n **kwargs,\n )\n elif self.how in ["sem", "std", "var", "ohlc", "prod", "median"]:\n if self.how in ["std", "sem"]:\n kwargs["is_datetimelike"] = is_datetimelike\n func(\n result,\n counts,\n values,\n comp_ids,\n min_count=min_count,\n mask=mask,\n result_mask=result_mask,\n **kwargs,\n )\n elif self.how in ["any", "all"]:\n func(\n out=result,\n values=values,\n labels=comp_ids,\n mask=mask,\n result_mask=result_mask,\n **kwargs,\n )\n result = result.astype(bool, copy=False)\n elif self.how in ["skew"]:\n func(\n out=result,\n counts=counts,\n values=values,\n labels=comp_ids,\n mask=mask,\n result_mask=result_mask,\n **kwargs,\n )\n if dtype == object:\n result = result.astype(object)\n\n else:\n raise NotImplementedError(f"{self.how} is not implemented")\n else:\n # TODO: min_count\n if self.how != "rank":\n # TODO: should rank take result_mask?\n kwargs["result_mask"] = result_mask\n func(\n out=result,\n values=values,\n labels=comp_ids,\n ngroups=ngroups,\n is_datetimelike=is_datetimelike,\n mask=mask,\n **kwargs,\n )\n\n if self.kind == "aggregate" and self.how not in ["idxmin", "idxmax"]:\n # i.e. counts is defined. Locations where count<min_count\n # need to have the result set to np.nan, which may require casting,\n # see GH#40767. For idxmin/idxmax is handled specially via post-processing\n if result.dtype.kind in "iu" and not is_datetimelike:\n # if the op keeps the int dtypes, we have to use 0\n cutoff = max(0 if self.how in ["sum", "prod"] else 1, min_count)\n empty_groups = counts < cutoff\n if empty_groups.any():\n if result_mask is not None:\n assert result_mask[empty_groups].all()\n else:\n # Note: this conversion could be lossy, see GH#40767\n result = result.astype("float64")\n result[empty_groups] = np.nan\n\n result = result.T\n\n if self.how not in self.cast_blocklist:\n # e.g. if we are int64 and need to restore to datetime64/timedelta64\n # "rank" is the only member of cast_blocklist we get here\n # Casting only needed for float16, bool, datetimelike,\n # and self.how in ["sum", "prod", "ohlc", "cumprod"]\n res_dtype = self._get_result_dtype(orig_values.dtype)\n op_result = maybe_downcast_to_dtype(result, res_dtype)\n else:\n op_result = result\n\n return op_result\n\n @final\n def _validate_axis(self, axis: AxisInt, values: ArrayLike) -> None:\n if values.ndim > 2:\n raise NotImplementedError("number of dimensions is currently limited to 2")\n if values.ndim == 2:\n assert axis == 1, axis\n elif not is_1d_only_ea_dtype(values.dtype):\n # Note: it is *not* the case that axis is always 0 for 1-dim values,\n # as we can have 1D ExtensionArrays that we need to treat as 2D\n assert axis == 0\n\n @final\n def cython_operation(\n self,\n *,\n values: ArrayLike,\n axis: AxisInt,\n min_count: int = -1,\n comp_ids: np.ndarray,\n ngroups: int,\n **kwargs,\n ) -> ArrayLike:\n """\n Call our cython function, with appropriate pre- and post- processing.\n """\n self._validate_axis(axis, values)\n\n if not isinstance(values, np.ndarray):\n # i.e. ExtensionArray\n return values._groupby_op(\n how=self.how,\n has_dropped_na=self.has_dropped_na,\n min_count=min_count,\n ngroups=ngroups,\n ids=comp_ids,\n **kwargs,\n )\n\n return self._cython_op_ndim_compat(\n values,\n min_count=min_count,\n ngroups=ngroups,\n comp_ids=comp_ids,\n mask=None,\n **kwargs,\n )\n\n\nclass BaseGrouper:\n """\n This is an internal Grouper class, which actually holds\n the generated groups\n\n Parameters\n ----------\n axis : Index\n groupings : Sequence[Grouping]\n all the grouping instances to handle in this grouper\n for example for grouper list to groupby, need to pass the list\n sort : bool, default True\n whether this grouper will give sorted result or not\n\n """\n\n axis: Index\n\n def __init__(\n self,\n axis: Index,\n groupings: Sequence[grouper.Grouping],\n sort: bool = True,\n dropna: bool = True,\n ) -> None:\n assert isinstance(axis, Index), axis\n\n self.axis = axis\n self._groupings: list[grouper.Grouping] = list(groupings)\n self._sort = sort\n self.dropna = dropna\n\n @property\n def groupings(self) -> list[grouper.Grouping]:\n return self._groupings\n\n @property\n def shape(self) -> Shape:\n return tuple(ping.ngroups for ping in self.groupings)\n\n def __iter__(self) -> Iterator[Hashable]:\n return iter(self.indices)\n\n @property\n def nkeys(self) -> int:\n return len(self.groupings)\n\n def get_iterator(\n self, data: NDFrameT, axis: AxisInt = 0\n ) -> Iterator[tuple[Hashable, NDFrameT]]:\n """\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n """\n splitter = self._get_splitter(data, axis=axis)\n keys = self.group_keys_seq\n yield from zip(keys, splitter)\n\n @final\n def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter:\n """\n Returns\n -------\n Generator yielding subsetted objects\n """\n ids, _, ngroups = self.group_info\n return _get_splitter(\n data,\n ids,\n ngroups,\n sorted_ids=self._sorted_ids,\n sort_idx=self._sort_idx,\n axis=axis,\n )\n\n @final\n @cache_readonly\n def group_keys_seq(self):\n if len(self.groupings) == 1:\n return self.levels[0]\n else:\n ids, _, ngroups = self.group_info\n\n # provide "flattened" iterator for multi-group setting\n return get_flattened_list(ids, ngroups, self.levels, self.codes)\n\n @cache_readonly\n def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:\n """dict {group name -> group indices}"""\n if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):\n # This shows unused categories in indices GH#38642\n return self.groupings[0].indices\n codes_list = [ping.codes for ping in self.groupings]\n keys = [ping._group_index for ping in self.groupings]\n return get_indexer_dict(codes_list, keys)\n\n @final\n def result_ilocs(self) -> npt.NDArray[np.intp]:\n """\n Get the original integer locations of result_index in the input.\n """\n # Original indices are where group_index would go via sorting.\n # But when dropna is true, we need to remove null values while accounting for\n # any gaps that then occur because of them.\n group_index = get_group_index(\n self.codes, self.shape, sort=self._sort, xnull=True\n )\n group_index, _ = compress_group_index(group_index, sort=self._sort)\n\n if self.has_dropped_na:\n mask = np.where(group_index >= 0)\n # Count how many gaps are caused by previous null values for each position\n null_gaps = np.cumsum(group_index == -1)[mask]\n group_index = group_index[mask]\n\n result = get_group_index_sorter(group_index, self.ngroups)\n\n if self.has_dropped_na:\n # Shift by the number of prior null gaps\n result += np.take(null_gaps, result)\n\n return result\n\n @final\n @property\n def codes(self) -> list[npt.NDArray[np.signedinteger]]:\n return [ping.codes for ping in self.groupings]\n\n @property\n def levels(self) -> list[Index]:\n return [ping._group_index for ping in self.groupings]\n\n @property\n def names(self) -> list[Hashable]:\n return [ping.name for ping in self.groupings]\n\n @final\n def size(self) -> Series:\n """\n Compute group sizes.\n """\n ids, _, ngroups = self.group_info\n out: np.ndarray | list\n if ngroups:\n out = np.bincount(ids[ids != -1], minlength=ngroups)\n else:\n out = []\n return Series(out, index=self.result_index, dtype="int64", copy=False)\n\n @cache_readonly\n def groups(self) -> dict[Hashable, np.ndarray]:\n """dict {group name -> group labels}"""\n if len(self.groupings) == 1:\n return self.groupings[0].groups\n else:\n to_groupby = []\n for ping in self.groupings:\n gv = ping.grouping_vector\n if not isinstance(gv, BaseGrouper):\n to_groupby.append(gv)\n else:\n to_groupby.append(gv.groupings[0].grouping_vector)\n index = MultiIndex.from_arrays(to_groupby)\n return self.axis.groupby(index)\n\n @final\n @cache_readonly\n def is_monotonic(self) -> bool:\n # return if my group orderings are monotonic\n return Index(self.group_info[0]).is_monotonic_increasing\n\n @final\n @cache_readonly\n def has_dropped_na(self) -> bool:\n """\n Whether grouper has null value(s) that are dropped.\n """\n return bool((self.group_info[0] < 0).any())\n\n @cache_readonly\n def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:\n comp_ids, obs_group_ids = self._get_compressed_codes()\n\n ngroups = len(obs_group_ids)\n comp_ids = ensure_platform_int(comp_ids)\n\n return comp_ids, obs_group_ids, ngroups\n\n @cache_readonly\n def codes_info(self) -> npt.NDArray[np.intp]:\n # return the codes of items in original grouped axis\n ids, _, _ = self.group_info\n return ids\n\n @final\n def _get_compressed_codes(\n self,\n ) -> tuple[npt.NDArray[np.signedinteger], npt.NDArray[np.intp]]:\n # The first returned ndarray may have any signed integer dtype\n if len(self.groupings) > 1:\n group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)\n return compress_group_index(group_index, sort=self._sort)\n # FIXME: compress_group_index's second return value is int64, not intp\n\n ping = self.groupings[0]\n return ping.codes, np.arange(len(ping._group_index), dtype=np.intp)\n\n @final\n @cache_readonly\n def ngroups(self) -> int:\n return len(self.result_index)\n\n @property\n def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]:\n codes = self.codes\n ids, obs_ids, _ = self.group_info\n return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)\n\n @cache_readonly\n def result_index(self) -> Index:\n if len(self.groupings) == 1:\n return self.groupings[0]._result_index.rename(self.names[0])\n\n codes = self.reconstructed_codes\n levels = [ping._result_index for ping in self.groupings]\n return MultiIndex(\n levels=levels, codes=codes, verify_integrity=False, names=self.names\n )\n\n @final\n def get_group_levels(self) -> list[ArrayLike]:\n # Note: only called from _insert_inaxis_grouper, which\n # is only called for BaseGrouper, never for BinGrouper\n if len(self.groupings) == 1:\n return [self.groupings[0]._group_arraylike]\n\n name_list = []\n for ping, codes in zip(self.groupings, self.reconstructed_codes):\n codes = ensure_platform_int(codes)\n levels = ping._group_arraylike.take(codes)\n\n name_list.append(levels)\n\n return name_list\n\n # ------------------------------------------------------------\n # Aggregation functions\n\n @final\n def _cython_operation(\n self,\n kind: str,\n values,\n how: str,\n axis: AxisInt,\n min_count: int = -1,\n **kwargs,\n ) -> ArrayLike:\n """\n Returns the values of a cython operation.\n """\n assert kind in ["transform", "aggregate"]\n\n cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na)\n\n ids, _, _ = self.group_info\n ngroups = self.ngroups\n return cy_op.cython_operation(\n values=values,\n axis=axis,\n min_count=min_count,\n comp_ids=ids,\n ngroups=ngroups,\n **kwargs,\n )\n\n @final\n def agg_series(\n self, obj: Series, func: Callable, preserve_dtype: bool = False\n ) -> ArrayLike:\n """\n Parameters\n ----------\n obj : Series\n func : function taking a Series and returning a scalar-like\n preserve_dtype : bool\n Whether the aggregation is known to be dtype-preserving.\n\n Returns\n -------\n np.ndarray or ExtensionArray\n """\n\n if not isinstance(obj._values, np.ndarray):\n # we can preserve a little bit more aggressively with EA dtype\n # because maybe_cast_pointwise_result will do a try/except\n # with _from_sequence. NB we are assuming here that _from_sequence\n # is sufficiently strict that it casts appropriately.\n preserve_dtype = True\n\n result = self._aggregate_series_pure_python(obj, func)\n\n npvalues = lib.maybe_convert_objects(result, try_float=False)\n if preserve_dtype:\n out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)\n else:\n out = npvalues\n return out\n\n @final\n def _aggregate_series_pure_python(\n self, obj: Series, func: Callable\n ) -> npt.NDArray[np.object_]:\n _, _, ngroups = self.group_info\n\n result = np.empty(ngroups, dtype="O")\n initialized = False\n\n splitter = self._get_splitter(obj, axis=0)\n\n for i, group in enumerate(splitter):\n res = func(group)\n res = extract_result(res)\n\n if not initialized:\n # We only do this validation on the first iteration\n check_result_array(res, group.dtype)\n initialized = True\n\n result[i] = res\n\n return result\n\n @final\n def apply_groupwise(\n self, f: Callable, data: DataFrame | Series, axis: AxisInt = 0\n ) -> tuple[list, bool]:\n mutated = False\n splitter = self._get_splitter(data, axis=axis)\n group_keys = self.group_keys_seq\n result_values = []\n\n # This calls DataSplitter.__iter__\n zipped = zip(group_keys, splitter)\n\n for key, group in zipped:\n # Pinning name is needed for\n # test_group_apply_once_per_group,\n # test_inconsistent_return_type, test_set_group_name,\n # test_group_name_available_in_inference_pass,\n # test_groupby_multi_timezone\n object.__setattr__(group, "name", key)\n\n # group might be modified\n group_axes = group.axes\n res = f(group)\n if not mutated and not _is_indexed_like(res, group_axes, axis):\n mutated = True\n result_values.append(res)\n # getattr pattern for __name__ is needed for functools.partial objects\n if len(group_keys) == 0 and getattr(f, "__name__", None) in [\n "skew",\n "sum",\n "prod",\n ]:\n # If group_keys is empty, then no function calls have been made,\n # so we will not have raised even if this is an invalid dtype.\n # So do one dummy call here to raise appropriate TypeError.\n f(data.iloc[:0])\n\n return result_values, mutated\n\n # ------------------------------------------------------------\n # Methods for sorting subsets of our GroupBy's object\n\n @final\n @cache_readonly\n def _sort_idx(self) -> npt.NDArray[np.intp]:\n # Counting sort indexer\n ids, _, ngroups = self.group_info\n return get_group_index_sorter(ids, ngroups)\n\n @final\n @cache_readonly\n def _sorted_ids(self) -> npt.NDArray[np.intp]:\n ids, _, _ = self.group_info\n return ids.take(self._sort_idx)\n\n\nclass BinGrouper(BaseGrouper):\n """\n This is an internal Grouper class\n\n Parameters\n ----------\n bins : the split index of binlabels to group the item of axis\n binlabels : the label list\n indexer : np.ndarray[np.intp], optional\n the indexer created by Grouper\n some groupers (TimeGrouper) will sort its axis and its\n group_info is also sorted, so need the indexer to reorder\n\n Examples\n --------\n bins: [2, 4, 6, 8, 10]\n binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',\n '2005-01-05', '2005-01-07', '2005-01-09'],\n dtype='datetime64[ns]', freq='2D')\n\n the group_info, which contains the label of each item in grouped\n axis, the index of label in label list, group number, is\n\n (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)\n\n means that, the grouped axis has 10 items, can be grouped into 5\n labels, the first and second items belong to the first label, the\n third and forth items belong to the second label, and so on\n\n """\n\n bins: npt.NDArray[np.int64]\n binlabels: Index\n\n def __init__(\n self,\n bins,\n binlabels,\n indexer=None,\n ) -> None:\n self.bins = ensure_int64(bins)\n self.binlabels = ensure_index(binlabels)\n self.indexer = indexer\n\n # These lengths must match, otherwise we could call agg_series\n # with empty self.bins, which would raise later.\n assert len(self.binlabels) == len(self.bins)\n\n @cache_readonly\n def groups(self):\n """dict {group name -> group labels}"""\n # this is mainly for compat\n # GH 3881\n result = {\n key: value\n for key, value in zip(self.binlabels, self.bins)\n if key is not NaT\n }\n return result\n\n @property\n def nkeys(self) -> int:\n # still matches len(self.groupings), but we can hard-code\n return 1\n\n @cache_readonly\n def codes_info(self) -> npt.NDArray[np.intp]:\n # return the codes of items in original grouped axis\n ids, _, _ = self.group_info\n if self.indexer is not None:\n sorter = np.lexsort((ids, self.indexer))\n ids = ids[sorter]\n return ids\n\n def get_iterator(self, data: NDFrame, axis: AxisInt = 0):\n """\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n """\n if axis == 0:\n slicer = lambda start, edge: data.iloc[start:edge]\n else:\n slicer = lambda start, edge: data.iloc[:, start:edge]\n\n length = len(data.axes[axis])\n\n start = 0\n for edge, label in zip(self.bins, self.binlabels):\n if label is not NaT:\n yield label, slicer(start, edge)\n start = edge\n\n if start < length:\n yield self.binlabels[-1], slicer(start, None)\n\n @cache_readonly\n def indices(self):\n indices = collections.defaultdict(list)\n\n i = 0\n for label, bin in zip(self.binlabels, self.bins):\n if i < bin:\n if label is not NaT:\n indices[label] = list(range(i, bin))\n i = bin\n return indices\n\n @cache_readonly\n def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:\n ngroups = self.ngroups\n obs_group_ids = np.arange(ngroups, dtype=np.intp)\n rep = np.diff(np.r_[0, self.bins])\n\n rep = ensure_platform_int(rep)\n if ngroups == len(self.bins):\n comp_ids = np.repeat(np.arange(ngroups), rep)\n else:\n comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)\n\n return (\n ensure_platform_int(comp_ids),\n obs_group_ids,\n ngroups,\n )\n\n @cache_readonly\n def reconstructed_codes(self) -> list[np.ndarray]:\n # get unique result indices, and prepend 0 as groupby starts from the first\n return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]\n\n @cache_readonly\n def result_index(self) -> Index:\n if len(self.binlabels) != 0 and isna(self.binlabels[0]):\n return self.binlabels[1:]\n\n return self.binlabels\n\n @property\n def levels(self) -> list[Index]:\n return [self.binlabels]\n\n @property\n def names(self) -> list[Hashable]:\n return [self.binlabels.name]\n\n @property\n def groupings(self) -> list[grouper.Grouping]:\n lev = self.binlabels\n codes = self.group_info[0]\n labels = lev.take(codes)\n ping = grouper.Grouping(\n labels, labels, in_axis=False, level=None, uniques=lev._values\n )\n return [ping]\n\n\ndef _is_indexed_like(obj, axes, axis: AxisInt) -> bool:\n if isinstance(obj, Series):\n if len(axes) > 1:\n return False\n return obj.axes[axis].equals(axes[axis])\n elif isinstance(obj, DataFrame):\n return obj.axes[axis].equals(axes[axis])\n\n return False\n\n\n# ----------------------------------------------------------------------\n# Splitting / application\n\n\nclass DataSplitter(Generic[NDFrameT]):\n def __init__(\n self,\n data: NDFrameT,\n labels: npt.NDArray[np.intp],\n ngroups: int,\n *,\n sort_idx: npt.NDArray[np.intp],\n sorted_ids: npt.NDArray[np.intp],\n axis: AxisInt = 0,\n ) -> None:\n self.data = data\n self.labels = ensure_platform_int(labels) # _should_ already be np.intp\n self.ngroups = ngroups\n\n self._slabels = sorted_ids\n self._sort_idx = sort_idx\n\n self.axis = axis\n assert isinstance(axis, int), axis\n\n def __iter__(self) -> Iterator:\n sdata = self._sorted_data\n\n if self.ngroups == 0:\n # we are inside a generator, rather than raise StopIteration\n # we merely return signal the end\n return\n\n starts, ends = lib.generate_slices(self._slabels, self.ngroups)\n\n for start, end in zip(starts, ends):\n yield self._chop(sdata, slice(start, end))\n\n @cache_readonly\n def _sorted_data(self) -> NDFrameT:\n return self.data.take(self._sort_idx, axis=self.axis)\n\n def _chop(self, sdata, slice_obj: slice) -> NDFrame:\n raise AbstractMethodError(self)\n\n\nclass SeriesSplitter(DataSplitter):\n def _chop(self, sdata: Series, slice_obj: slice) -> Series:\n # fastpath equivalent to `sdata.iloc[slice_obj]`\n mgr = sdata._mgr.get_slice(slice_obj)\n ser = sdata._constructor_from_mgr(mgr, axes=mgr.axes)\n ser._name = sdata.name\n return ser.__finalize__(sdata, method="groupby")\n\n\nclass FrameSplitter(DataSplitter):\n def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:\n # Fastpath equivalent to:\n # if self.axis == 0:\n # return sdata.iloc[slice_obj]\n # else:\n # return sdata.iloc[:, slice_obj]\n mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)\n df = sdata._constructor_from_mgr(mgr, axes=mgr.axes)\n return df.__finalize__(sdata, method="groupby")\n\n\ndef _get_splitter(\n data: NDFrame,\n labels: npt.NDArray[np.intp],\n ngroups: int,\n *,\n sort_idx: npt.NDArray[np.intp],\n sorted_ids: npt.NDArray[np.intp],\n axis: AxisInt = 0,\n) -> DataSplitter:\n if isinstance(data, Series):\n klass: type[DataSplitter] = SeriesSplitter\n else:\n # i.e. DataFrame\n klass = FrameSplitter\n\n return klass(\n data, labels, ngroups, sort_idx=sort_idx, sorted_ids=sorted_ids, axis=axis\n )\n | .venv\Lib\site-packages\pandas\core\groupby\ops.py | ops.py | Python | 38,234 | 0.95 | 0.165563 | 0.110572 | node-utils | 318 | 2024-11-19T15:32:21.848269 | GPL-3.0 | false | 794d7acafc067eb02fc0cbab58e8825b |
from pandas.core.groupby.generic import (\n DataFrameGroupBy,\n NamedAgg,\n SeriesGroupBy,\n)\nfrom pandas.core.groupby.groupby import GroupBy\nfrom pandas.core.groupby.grouper import Grouper\n\n__all__ = [\n "DataFrameGroupBy",\n "NamedAgg",\n "SeriesGroupBy",\n "GroupBy",\n "Grouper",\n]\n | .venv\Lib\site-packages\pandas\core\groupby\__init__.py | __init__.py | Python | 301 | 0.85 | 0 | 0 | awesome-app | 658 | 2023-11-02T19:26:15.239607 | MIT | false | 945b06dfcabc30a4ff3999d69a5f1cd1 |
\n\n | .venv\Lib\site-packages\pandas\core\groupby\__pycache__\base.cpython-313.pyc | base.cpython-313.pyc | Other | 1,703 | 0.8 | 0.045455 | 0 | node-utils | 908 | 2025-03-13T01:20:11.522009 | Apache-2.0 | false | 4767741e241b2dbcd91afb5efd0b28f0 |
\n\n | .venv\Lib\site-packages\pandas\core\groupby\__pycache__\categorical.cpython-313.pyc | categorical.cpython-313.pyc | Other | 3,119 | 0.8 | 0.039216 | 0 | vue-tools | 66 | 2024-04-27T08:13:57.951078 | MIT | false | 52810021b871f2d7ed85b3e75c29545d |
\n\n | .venv\Lib\site-packages\pandas\core\groupby\__pycache__\generic.cpython-313.pyc | generic.cpython-313.pyc | Other | 98,129 | 0.75 | 0.049972 | 0.004467 | python-kit | 283 | 2024-01-01T23:42:27.156758 | BSD-3-Clause | false | f1b7a981bf85167d4be94b5ac048cd40 |
\n\n | .venv\Lib\site-packages\pandas\core\groupby\__pycache__\grouper.cpython-313.pyc | grouper.cpython-313.pyc | Other | 39,851 | 0.95 | 0.03352 | 0 | node-utils | 179 | 2024-12-14T11:37:48.015664 | MIT | false | 1dfe6726ce2050a91114122d4f29a325 |
\n\n | .venv\Lib\site-packages\pandas\core\groupby\__pycache__\indexing.cpython-313.pyc | indexing.cpython-313.pyc | Other | 11,823 | 0.8 | 0.067485 | 0.013889 | python-kit | 802 | 2024-07-03T10:04:55.955083 | MIT | false | c73a69724f0f3bea3474ea63ba054804 |
\n\n | .venv\Lib\site-packages\pandas\core\groupby\__pycache__\numba_.cpython-313.pyc | numba_.cpython-313.pyc | Other | 5,939 | 0.95 | 0.185484 | 0.009174 | vue-tools | 707 | 2024-05-21T18:56:27.917736 | GPL-3.0 | false | 1fac5b6db509269b9b7b29a53df11ee5 |
\n\n | .venv\Lib\site-packages\pandas\core\groupby\__pycache__\ops.cpython-313.pyc | ops.cpython-313.pyc | Other | 43,589 | 0.95 | 0.022088 | 0 | node-utils | 763 | 2023-10-15T23:04:20.647263 | GPL-3.0 | false | d438ae607036ebc89c2d385b03932465 |
\n\n | .venv\Lib\site-packages\pandas\core\groupby\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 488 | 0.7 | 0 | 0 | vue-tools | 463 | 2024-01-20T02:38:43.010482 | Apache-2.0 | false | c1d698389af5ad8cb44fcf2fbf135fef |
"""Indexer objects for computing start/end window bounds for rolling operations"""\nfrom __future__ import annotations\n\nfrom datetime import timedelta\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import BaseOffset\nfrom pandas._libs.window.indexers import calculate_variable_window_bounds\nfrom pandas.util._decorators import Appender\n\nfrom pandas.core.dtypes.common import ensure_platform_int\n\nfrom pandas.core.indexes.datetimes import DatetimeIndex\n\nfrom pandas.tseries.offsets import Nano\n\nget_window_bounds_doc = """\nComputes the bounds of a window.\n\nParameters\n----------\nnum_values : int, default 0\n number of values that will be aggregated over\nwindow_size : int, default 0\n the number of rows in a window\nmin_periods : int, default None\n min_periods passed from the top level rolling API\ncenter : bool, default None\n center passed from the top level rolling API\nclosed : str, default None\n closed passed from the top level rolling API\nstep : int, default None\n step passed from the top level rolling API\n .. versionadded:: 1.5\nwin_type : str, default None\n win_type passed from the top level rolling API\n\nReturns\n-------\nA tuple of ndarray[int64]s, indicating the boundaries of each\nwindow\n"""\n\n\nclass BaseIndexer:\n """\n Base class for window bounds calculations.\n\n Examples\n --------\n >>> from pandas.api.indexers import BaseIndexer\n >>> class CustomIndexer(BaseIndexer):\n ... def get_window_bounds(self, num_values, min_periods, center, closed, step):\n ... start = np.empty(num_values, dtype=np.int64)\n ... end = np.empty(num_values, dtype=np.int64)\n ... for i in range(num_values):\n ... start[i] = i\n ... end[i] = i + self.window_size\n ... return start, end\n >>> df = pd.DataFrame({"values": range(5)})\n >>> indexer = CustomIndexer(window_size=2)\n >>> df.rolling(indexer).sum()\n values\n 0 1.0\n 1 3.0\n 2 5.0\n 3 7.0\n 4 4.0\n """\n\n def __init__(\n self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs\n ) -> None:\n self.index_array = index_array\n self.window_size = window_size\n # Set user defined kwargs as attributes that can be used in get_window_bounds\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: int | None = None,\n center: bool | None = None,\n closed: str | None = None,\n step: int | None = None,\n ) -> tuple[np.ndarray, np.ndarray]:\n raise NotImplementedError\n\n\nclass FixedWindowIndexer(BaseIndexer):\n """Creates window boundaries that are of fixed length."""\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: int | None = None,\n center: bool | None = None,\n closed: str | None = None,\n step: int | None = None,\n ) -> tuple[np.ndarray, np.ndarray]:\n if center or self.window_size == 0:\n offset = (self.window_size - 1) // 2\n else:\n offset = 0\n\n end = np.arange(1 + offset, num_values + 1 + offset, step, dtype="int64")\n start = end - self.window_size\n if closed in ["left", "both"]:\n start -= 1\n if closed in ["left", "neither"]:\n end -= 1\n\n end = np.clip(end, 0, num_values)\n start = np.clip(start, 0, num_values)\n\n return start, end\n\n\nclass VariableWindowIndexer(BaseIndexer):\n """Creates window boundaries that are of variable length, namely for time series."""\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: int | None = None,\n center: bool | None = None,\n closed: str | None = None,\n step: int | None = None,\n ) -> tuple[np.ndarray, np.ndarray]:\n # error: Argument 4 to "calculate_variable_window_bounds" has incompatible\n # type "Optional[bool]"; expected "bool"\n # error: Argument 6 to "calculate_variable_window_bounds" has incompatible\n # type "Optional[ndarray]"; expected "ndarray"\n return calculate_variable_window_bounds(\n num_values,\n self.window_size,\n min_periods,\n center, # type: ignore[arg-type]\n closed,\n self.index_array, # type: ignore[arg-type]\n )\n\n\nclass VariableOffsetWindowIndexer(BaseIndexer):\n """\n Calculate window boundaries based on a non-fixed offset such as a BusinessDay.\n\n Examples\n --------\n >>> from pandas.api.indexers import VariableOffsetWindowIndexer\n >>> df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10))\n >>> offset = pd.offsets.BDay(1)\n >>> indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset)\n >>> df\n 0\n 2020-01-01 0\n 2020-01-02 1\n 2020-01-03 2\n 2020-01-04 3\n 2020-01-05 4\n 2020-01-06 5\n 2020-01-07 6\n 2020-01-08 7\n 2020-01-09 8\n 2020-01-10 9\n >>> df.rolling(indexer).sum()\n 0\n 2020-01-01 0.0\n 2020-01-02 1.0\n 2020-01-03 2.0\n 2020-01-04 3.0\n 2020-01-05 7.0\n 2020-01-06 12.0\n 2020-01-07 6.0\n 2020-01-08 7.0\n 2020-01-09 8.0\n 2020-01-10 9.0\n """\n\n def __init__(\n self,\n index_array: np.ndarray | None = None,\n window_size: int = 0,\n index: DatetimeIndex | None = None,\n offset: BaseOffset | None = None,\n **kwargs,\n ) -> None:\n super().__init__(index_array, window_size, **kwargs)\n if not isinstance(index, DatetimeIndex):\n raise ValueError("index must be a DatetimeIndex.")\n self.index = index\n if not isinstance(offset, BaseOffset):\n raise ValueError("offset must be a DateOffset-like object.")\n self.offset = offset\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: int | None = None,\n center: bool | None = None,\n closed: str | None = None,\n step: int | None = None,\n ) -> tuple[np.ndarray, np.ndarray]:\n if step is not None:\n raise NotImplementedError("step not implemented for variable offset window")\n if num_values <= 0:\n return np.empty(0, dtype="int64"), np.empty(0, dtype="int64")\n\n # if windows is variable, default is 'right', otherwise default is 'both'\n if closed is None:\n closed = "right" if self.index is not None else "both"\n\n right_closed = closed in ["right", "both"]\n left_closed = closed in ["left", "both"]\n\n if self.index[num_values - 1] < self.index[0]:\n index_growth_sign = -1\n else:\n index_growth_sign = 1\n offset_diff = index_growth_sign * self.offset\n\n start = np.empty(num_values, dtype="int64")\n start.fill(-1)\n end = np.empty(num_values, dtype="int64")\n end.fill(-1)\n\n start[0] = 0\n\n # right endpoint is closed\n if right_closed:\n end[0] = 1\n # right endpoint is open\n else:\n end[0] = 0\n\n zero = timedelta(0)\n # start is start of slice interval (including)\n # end is end of slice interval (not including)\n for i in range(1, num_values):\n end_bound = self.index[i]\n start_bound = end_bound - offset_diff\n\n # left endpoint is closed\n if left_closed:\n start_bound -= Nano(1)\n\n # advance the start bound until we are\n # within the constraint\n start[i] = i\n for j in range(start[i - 1], i):\n start_diff = (self.index[j] - start_bound) * index_growth_sign\n if start_diff > zero:\n start[i] = j\n break\n\n # end bound is previous end\n # or current index\n end_diff = (self.index[end[i - 1]] - end_bound) * index_growth_sign\n if end_diff == zero and not right_closed:\n end[i] = end[i - 1] + 1\n elif end_diff <= zero:\n end[i] = i + 1\n else:\n end[i] = end[i - 1]\n\n # right endpoint is open\n if not right_closed:\n end[i] -= 1\n\n return start, end\n\n\nclass ExpandingIndexer(BaseIndexer):\n """Calculate expanding window bounds, mimicking df.expanding()"""\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: int | None = None,\n center: bool | None = None,\n closed: str | None = None,\n step: int | None = None,\n ) -> tuple[np.ndarray, np.ndarray]:\n return (\n np.zeros(num_values, dtype=np.int64),\n np.arange(1, num_values + 1, dtype=np.int64),\n )\n\n\nclass FixedForwardWindowIndexer(BaseIndexer):\n """\n Creates window boundaries for fixed-length windows that include the current row.\n\n Examples\n --------\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n >>> df\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)\n >>> df.rolling(window=indexer, min_periods=1).sum()\n B\n 0 1.0\n 1 3.0\n 2 2.0\n 3 4.0\n 4 4.0\n """\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: int | None = None,\n center: bool | None = None,\n closed: str | None = None,\n step: int | None = None,\n ) -> tuple[np.ndarray, np.ndarray]:\n if center:\n raise ValueError("Forward-looking windows can't have center=True")\n if closed is not None:\n raise ValueError(\n "Forward-looking windows don't support setting the closed argument"\n )\n if step is None:\n step = 1\n\n start = np.arange(0, num_values, step, dtype="int64")\n end = start + self.window_size\n if self.window_size:\n end = np.clip(end, 0, num_values)\n\n return start, end\n\n\nclass GroupbyIndexer(BaseIndexer):\n """Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()"""\n\n def __init__(\n self,\n index_array: np.ndarray | None = None,\n window_size: int | BaseIndexer = 0,\n groupby_indices: dict | None = None,\n window_indexer: type[BaseIndexer] = BaseIndexer,\n indexer_kwargs: dict | None = None,\n **kwargs,\n ) -> None:\n """\n Parameters\n ----------\n index_array : np.ndarray or None\n np.ndarray of the index of the original object that we are performing\n a chained groupby operation over. This index has been pre-sorted relative to\n the groups\n window_size : int or BaseIndexer\n window size during the windowing operation\n groupby_indices : dict or None\n dict of {group label: [positional index of rows belonging to the group]}\n window_indexer : BaseIndexer\n BaseIndexer class determining the start and end bounds of each group\n indexer_kwargs : dict or None\n Custom kwargs to be passed to window_indexer\n **kwargs :\n keyword arguments that will be available when get_window_bounds is called\n """\n self.groupby_indices = groupby_indices or {}\n self.window_indexer = window_indexer\n self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {}\n super().__init__(\n index_array=index_array,\n window_size=self.indexer_kwargs.pop("window_size", window_size),\n **kwargs,\n )\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: int | None = None,\n center: bool | None = None,\n closed: str | None = None,\n step: int | None = None,\n ) -> tuple[np.ndarray, np.ndarray]:\n # 1) For each group, get the indices that belong to the group\n # 2) Use the indices to calculate the start & end bounds of the window\n # 3) Append the window bounds in group order\n start_arrays = []\n end_arrays = []\n window_indices_start = 0\n for key, indices in self.groupby_indices.items():\n index_array: np.ndarray | None\n\n if self.index_array is not None:\n index_array = self.index_array.take(ensure_platform_int(indices))\n else:\n index_array = self.index_array\n indexer = self.window_indexer(\n index_array=index_array,\n window_size=self.window_size,\n **self.indexer_kwargs,\n )\n start, end = indexer.get_window_bounds(\n len(indices), min_periods, center, closed, step\n )\n start = start.astype(np.int64)\n end = end.astype(np.int64)\n assert len(start) == len(\n end\n ), "these should be equal in length from get_window_bounds"\n # Cannot use groupby_indices as they might not be monotonic with the object\n # we're rolling over\n window_indices = np.arange(\n window_indices_start, window_indices_start + len(indices)\n )\n window_indices_start += len(indices)\n # Extend as we'll be slicing window like [start, end)\n window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype(\n np.int64, copy=False\n )\n start_arrays.append(window_indices.take(ensure_platform_int(start)))\n end_arrays.append(window_indices.take(ensure_platform_int(end)))\n if len(start_arrays) == 0:\n return np.array([], dtype=np.int64), np.array([], dtype=np.int64)\n start = np.concatenate(start_arrays)\n end = np.concatenate(end_arrays)\n return start, end\n\n\nclass ExponentialMovingWindowIndexer(BaseIndexer):\n """Calculate ewm window bounds (the entire window)"""\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: int | None = None,\n center: bool | None = None,\n closed: str | None = None,\n step: int | None = None,\n ) -> tuple[np.ndarray, np.ndarray]:\n return np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64)\n | .venv\Lib\site-packages\pandas\core\indexers\objects.py | objects.py | Python | 14,737 | 0.95 | 0.125828 | 0.068354 | node-utils | 480 | 2024-11-17T04:52:24.389614 | Apache-2.0 | false | 43695f7a1bd1fa1d25fbd51ee345b4c8 |
"""\nLow-dependency indexing utilities.\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nimport numpy as np\n\nfrom pandas._libs import lib\n\nfrom pandas.core.dtypes.common import (\n is_array_like,\n is_bool_dtype,\n is_integer,\n is_integer_dtype,\n is_list_like,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCIndex,\n ABCSeries,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import AnyArrayLike\n\n from pandas.core.frame import DataFrame\n from pandas.core.indexes.base import Index\n\n# -----------------------------------------------------------\n# Indexer Identification\n\n\ndef is_valid_positional_slice(slc: slice) -> bool:\n """\n Check if a slice object can be interpreted as a positional indexer.\n\n Parameters\n ----------\n slc : slice\n\n Returns\n -------\n bool\n\n Notes\n -----\n A valid positional slice may also be interpreted as a label-based slice\n depending on the index being sliced.\n """\n return (\n lib.is_int_or_none(slc.start)\n and lib.is_int_or_none(slc.stop)\n and lib.is_int_or_none(slc.step)\n )\n\n\ndef is_list_like_indexer(key) -> bool:\n """\n Check if we have a list-like indexer that is *not* a NamedTuple.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n bool\n """\n # allow a list_like, but exclude NamedTuples which can be indexers\n return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)\n\n\ndef is_scalar_indexer(indexer, ndim: int) -> bool:\n """\n Return True if we are all scalar indexers.\n\n Parameters\n ----------\n indexer : object\n ndim : int\n Number of dimensions in the object being indexed.\n\n Returns\n -------\n bool\n """\n if ndim == 1 and is_integer(indexer):\n # GH37748: allow indexer to be an integer for Series\n return True\n if isinstance(indexer, tuple) and len(indexer) == ndim:\n return all(is_integer(x) for x in indexer)\n return False\n\n\ndef is_empty_indexer(indexer) -> bool:\n """\n Check if we have an empty indexer.\n\n Parameters\n ----------\n indexer : object\n\n Returns\n -------\n bool\n """\n if is_list_like(indexer) and not len(indexer):\n return True\n if not isinstance(indexer, tuple):\n indexer = (indexer,)\n return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)\n\n\n# -----------------------------------------------------------\n# Indexer Validation\n\n\ndef check_setitem_lengths(indexer, value, values) -> bool:\n """\n Validate that value and indexer are the same length.\n\n An special-case is allowed for when the indexer is a boolean array\n and the number of true values equals the length of ``value``. In\n this case, no exception is raised.\n\n Parameters\n ----------\n indexer : sequence\n Key for the setitem.\n value : array-like\n Value for the setitem.\n values : array-like\n Values being set into.\n\n Returns\n -------\n bool\n Whether this is an empty listlike setting which is a no-op.\n\n Raises\n ------\n ValueError\n When the indexer is an ndarray or list and the lengths don't match.\n """\n no_op = False\n\n if isinstance(indexer, (np.ndarray, list)):\n # We can ignore other listlikes because they are either\n # a) not necessarily 1-D indexers, e.g. tuple\n # b) boolean indexers e.g. BoolArray\n if is_list_like(value):\n if len(indexer) != len(value) and values.ndim == 1:\n # boolean with truth values == len of the value is ok too\n if isinstance(indexer, list):\n indexer = np.array(indexer)\n if not (\n isinstance(indexer, np.ndarray)\n and indexer.dtype == np.bool_\n and indexer.sum() == len(value)\n ):\n raise ValueError(\n "cannot set using a list-like indexer "\n "with a different length than the value"\n )\n if not len(indexer):\n no_op = True\n\n elif isinstance(indexer, slice):\n if is_list_like(value):\n if len(value) != length_of_indexer(indexer, values) and values.ndim == 1:\n # In case of two dimensional value is used row-wise and broadcasted\n raise ValueError(\n "cannot set using a slice indexer with a "\n "different length than the value"\n )\n if not len(value):\n no_op = True\n\n return no_op\n\n\ndef validate_indices(indices: np.ndarray, n: int) -> None:\n """\n Perform bounds-checking for an indexer.\n\n -1 is allowed for indicating missing values.\n\n Parameters\n ----------\n indices : ndarray\n n : int\n Length of the array being indexed.\n\n Raises\n ------\n ValueError\n\n Examples\n --------\n >>> validate_indices(np.array([1, 2]), 3) # OK\n\n >>> validate_indices(np.array([1, -2]), 3)\n Traceback (most recent call last):\n ...\n ValueError: negative dimensions are not allowed\n\n >>> validate_indices(np.array([1, 2, 3]), 3)\n Traceback (most recent call last):\n ...\n IndexError: indices are out-of-bounds\n\n >>> validate_indices(np.array([-1, -1]), 0) # OK\n\n >>> validate_indices(np.array([0, 1]), 0)\n Traceback (most recent call last):\n ...\n IndexError: indices are out-of-bounds\n """\n if len(indices):\n min_idx = indices.min()\n if min_idx < -1:\n msg = f"'indices' contains values less than allowed ({min_idx} < -1)"\n raise ValueError(msg)\n\n max_idx = indices.max()\n if max_idx >= n:\n raise IndexError("indices are out-of-bounds")\n\n\n# -----------------------------------------------------------\n# Indexer Conversion\n\n\ndef maybe_convert_indices(indices, n: int, verify: bool = True) -> np.ndarray:\n """\n Attempt to convert indices into valid, positive indices.\n\n If we have negative indices, translate to positive here.\n If we have indices that are out-of-bounds, raise an IndexError.\n\n Parameters\n ----------\n indices : array-like\n Array of indices that we are to convert.\n n : int\n Number of elements in the array that we are indexing.\n verify : bool, default True\n Check that all entries are between 0 and n - 1, inclusive.\n\n Returns\n -------\n array-like\n An array-like of positive indices that correspond to the ones\n that were passed in initially to this function.\n\n Raises\n ------\n IndexError\n One of the converted indices either exceeded the number of,\n elements (specified by `n`), or was still negative.\n """\n if isinstance(indices, list):\n indices = np.array(indices)\n if len(indices) == 0:\n # If `indices` is empty, np.array will return a float,\n # and will cause indexing errors.\n return np.empty(0, dtype=np.intp)\n\n mask = indices < 0\n if mask.any():\n indices = indices.copy()\n indices[mask] += n\n\n if verify:\n mask = (indices >= n) | (indices < 0)\n if mask.any():\n raise IndexError("indices are out-of-bounds")\n return indices\n\n\n# -----------------------------------------------------------\n# Unsorted\n\n\ndef length_of_indexer(indexer, target=None) -> int:\n """\n Return the expected length of target[indexer]\n\n Returns\n -------\n int\n """\n if target is not None and isinstance(indexer, slice):\n target_len = len(target)\n start = indexer.start\n stop = indexer.stop\n step = indexer.step\n if start is None:\n start = 0\n elif start < 0:\n start += target_len\n if stop is None or stop > target_len:\n stop = target_len\n elif stop < 0:\n stop += target_len\n if step is None:\n step = 1\n elif step < 0:\n start, stop = stop + 1, start + 1\n step = -step\n return (stop - start + step - 1) // step\n elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)):\n if isinstance(indexer, list):\n indexer = np.array(indexer)\n\n if indexer.dtype == bool:\n # GH#25774\n return indexer.sum()\n return len(indexer)\n elif isinstance(indexer, range):\n return (indexer.stop - indexer.start) // indexer.step\n elif not is_list_like_indexer(indexer):\n return 1\n raise AssertionError("cannot find the length of the indexer")\n\n\ndef disallow_ndim_indexing(result) -> None:\n """\n Helper function to disallow multi-dimensional indexing on 1D Series/Index.\n\n GH#27125 indexer like idx[:, None] expands dim, but we cannot do that\n and keep an index, so we used to return ndarray, which was deprecated\n in GH#30588.\n """\n if np.ndim(result) > 1:\n raise ValueError(\n "Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer "\n "supported. Convert to a numpy array before indexing instead."\n )\n\n\ndef unpack_1tuple(tup):\n """\n If we have a length-1 tuple/list that contains a slice, unpack to just\n the slice.\n\n Notes\n -----\n The list case is deprecated.\n """\n if len(tup) == 1 and isinstance(tup[0], slice):\n # if we don't have a MultiIndex, we may still be able to handle\n # a 1-tuple. see test_1tuple_without_multiindex\n\n if isinstance(tup, list):\n # GH#31299\n raise ValueError(\n "Indexing with a single-item list containing a "\n "slice is not allowed. Pass a tuple instead.",\n )\n\n return tup[0]\n return tup\n\n\ndef check_key_length(columns: Index, key, value: DataFrame) -> None:\n """\n Checks if a key used as indexer has the same length as the columns it is\n associated with.\n\n Parameters\n ----------\n columns : Index The columns of the DataFrame to index.\n key : A list-like of keys to index with.\n value : DataFrame The value to set for the keys.\n\n Raises\n ------\n ValueError: If the length of key is not equal to the number of columns in value\n or if the number of columns referenced by key is not equal to number\n of columns.\n """\n if columns.is_unique:\n if len(value.columns) != len(key):\n raise ValueError("Columns must be same length as key")\n else:\n # Missing keys in columns are represented as -1\n if len(columns.get_indexer_non_unique(key)[0]) != len(value.columns):\n raise ValueError("Columns must be same length as key")\n\n\ndef unpack_tuple_and_ellipses(item: tuple):\n """\n Possibly unpack arr[..., n] to arr[n]\n """\n if len(item) > 1:\n # Note: we are assuming this indexing is being done on a 1D arraylike\n if item[0] is Ellipsis:\n item = item[1:]\n elif item[-1] is Ellipsis:\n item = item[:-1]\n\n if len(item) > 1:\n raise IndexError("too many indices for array.")\n\n item = item[0]\n return item\n\n\n# -----------------------------------------------------------\n# Public indexer validation\n\n\ndef check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:\n """\n Check if `indexer` is a valid array indexer for `array`.\n\n For a boolean mask, `array` and `indexer` are checked to have the same\n length. The dtype is validated, and if it is an integer or boolean\n ExtensionArray, it is checked if there are missing values present, and\n it is converted to the appropriate numpy array. Other dtypes will raise\n an error.\n\n Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed\n through as is.\n\n Parameters\n ----------\n array : array-like\n The array that is being indexed (only used for the length).\n indexer : array-like or list-like\n The array-like that's used to index. List-like input that is not yet\n a numpy array or an ExtensionArray is converted to one. Other input\n types are passed through as is.\n\n Returns\n -------\n numpy.ndarray\n The validated indexer as a numpy array that can be used to index.\n\n Raises\n ------\n IndexError\n When the lengths don't match.\n ValueError\n When `indexer` cannot be converted to a numpy ndarray to index\n (e.g. presence of missing values).\n\n See Also\n --------\n api.types.is_bool_dtype : Check if `key` is of boolean dtype.\n\n Examples\n --------\n When checking a boolean mask, a boolean ndarray is returned when the\n arguments are all valid.\n\n >>> mask = pd.array([True, False])\n >>> arr = pd.array([1, 2])\n >>> pd.api.indexers.check_array_indexer(arr, mask)\n array([ True, False])\n\n An IndexError is raised when the lengths don't match.\n\n >>> mask = pd.array([True, False, True])\n >>> pd.api.indexers.check_array_indexer(arr, mask)\n Traceback (most recent call last):\n ...\n IndexError: Boolean index has wrong length: 3 instead of 2.\n\n NA values in a boolean array are treated as False.\n\n >>> mask = pd.array([True, pd.NA])\n >>> pd.api.indexers.check_array_indexer(arr, mask)\n array([ True, False])\n\n A numpy boolean mask will get passed through (if the length is correct):\n\n >>> mask = np.array([True, False])\n >>> pd.api.indexers.check_array_indexer(arr, mask)\n array([ True, False])\n\n Similarly for integer indexers, an integer ndarray is returned when it is\n a valid indexer, otherwise an error is (for integer indexers, a matching\n length is not required):\n\n >>> indexer = pd.array([0, 2], dtype="Int64")\n >>> arr = pd.array([1, 2, 3])\n >>> pd.api.indexers.check_array_indexer(arr, indexer)\n array([0, 2])\n\n >>> indexer = pd.array([0, pd.NA], dtype="Int64")\n >>> pd.api.indexers.check_array_indexer(arr, indexer)\n Traceback (most recent call last):\n ...\n ValueError: Cannot index with an integer indexer containing NA values\n\n For non-integer/boolean dtypes, an appropriate error is raised:\n\n >>> indexer = np.array([0., 2.], dtype="float64")\n >>> pd.api.indexers.check_array_indexer(arr, indexer)\n Traceback (most recent call last):\n ...\n IndexError: arrays used as indices must be of integer or boolean type\n """\n from pandas.core.construction import array as pd_array\n\n # whatever is not an array-like is returned as-is (possible valid array\n # indexers that are not array-like: integer, slice, Ellipsis, None)\n # In this context, tuples are not considered as array-like, as they have\n # a specific meaning in indexing (multi-dimensional indexing)\n if is_list_like(indexer):\n if isinstance(indexer, tuple):\n return indexer\n else:\n return indexer\n\n # convert list-likes to array\n if not is_array_like(indexer):\n indexer = pd_array(indexer)\n if len(indexer) == 0:\n # empty list is converted to float array by pd.array\n indexer = np.array([], dtype=np.intp)\n\n dtype = indexer.dtype\n if is_bool_dtype(dtype):\n if isinstance(dtype, ExtensionDtype):\n indexer = indexer.to_numpy(dtype=bool, na_value=False)\n else:\n indexer = np.asarray(indexer, dtype=bool)\n\n # GH26658\n if len(indexer) != len(array):\n raise IndexError(\n f"Boolean index has wrong length: "\n f"{len(indexer)} instead of {len(array)}"\n )\n elif is_integer_dtype(dtype):\n try:\n indexer = np.asarray(indexer, dtype=np.intp)\n except ValueError as err:\n raise ValueError(\n "Cannot index with an integer indexer containing NA values"\n ) from err\n else:\n raise IndexError("arrays used as indices must be of integer or boolean type")\n\n return indexer\n | .venv\Lib\site-packages\pandas\core\indexers\utils.py | utils.py | Python | 16,069 | 0.95 | 0.155515 | 0.071588 | node-utils | 988 | 2024-05-01T01:00:35.172076 | BSD-3-Clause | false | e481b237c768db2ec4c872c70ffe55fd |
from pandas.core.indexers.utils import (\n check_array_indexer,\n check_key_length,\n check_setitem_lengths,\n disallow_ndim_indexing,\n is_empty_indexer,\n is_list_like_indexer,\n is_scalar_indexer,\n is_valid_positional_slice,\n length_of_indexer,\n maybe_convert_indices,\n unpack_1tuple,\n unpack_tuple_and_ellipses,\n validate_indices,\n)\n\n__all__ = [\n "is_valid_positional_slice",\n "is_list_like_indexer",\n "is_scalar_indexer",\n "is_empty_indexer",\n "check_setitem_lengths",\n "validate_indices",\n "maybe_convert_indices",\n "length_of_indexer",\n "disallow_ndim_indexing",\n "unpack_1tuple",\n "check_key_length",\n "check_array_indexer",\n "unpack_tuple_and_ellipses",\n]\n | .venv\Lib\site-packages\pandas\core\indexers\__init__.py | __init__.py | Python | 736 | 0.85 | 0 | 0 | react-lib | 760 | 2024-12-17T20:55:05.729569 | MIT | false | e66dac619b60cb341545041152af3c49 |
\n\n | .venv\Lib\site-packages\pandas\core\indexers\__pycache__\objects.cpython-313.pyc | objects.cpython-313.pyc | Other | 16,160 | 0.95 | 0.037801 | 0.003623 | react-lib | 929 | 2024-07-18T20:12:32.145456 | MIT | false | e812657bb849b70d6e892110ca3bab1a |
\n\n | .venv\Lib\site-packages\pandas\core\indexers\__pycache__\utils.cpython-313.pyc | utils.cpython-313.pyc | Other | 17,030 | 0.95 | 0.070381 | 0.006944 | awesome-app | 658 | 2024-03-08T11:42:22.651627 | MIT | false | 7b75fc5c8f8f187e48332c6a091e7823 |
\n\n | .venv\Lib\site-packages\pandas\core\indexers\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 737 | 0.7 | 0 | 0 | node-utils | 36 | 2025-03-21T23:09:45.231355 | GPL-3.0 | false | 480d2acd9a5884cf49e7d1a25636cded |
"""\ndatetimelike delegation\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n is_integer_dtype,\n is_list_like,\n)\nfrom pandas.core.dtypes.dtypes import (\n ArrowDtype,\n CategoricalDtype,\n DatetimeTZDtype,\n PeriodDtype,\n)\nfrom pandas.core.dtypes.generic import ABCSeries\n\nfrom pandas.core.accessor import (\n PandasDelegate,\n delegate_names,\n)\nfrom pandas.core.arrays import (\n DatetimeArray,\n PeriodArray,\n TimedeltaArray,\n)\nfrom pandas.core.arrays.arrow.array import ArrowExtensionArray\nfrom pandas.core.base import (\n NoNewAttributesMixin,\n PandasObject,\n)\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex\n\nif TYPE_CHECKING:\n from pandas import (\n DataFrame,\n Series,\n )\n\n\nclass Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):\n _hidden_attrs = PandasObject._hidden_attrs | {\n "orig",\n "name",\n }\n\n def __init__(self, data: Series, orig) -> None:\n if not isinstance(data, ABCSeries):\n raise TypeError(\n f"cannot convert an object of type {type(data)} to a datetimelike index"\n )\n\n self._parent = data\n self.orig = orig\n self.name = getattr(data, "name", None)\n self._freeze()\n\n def _get_values(self):\n data = self._parent\n if lib.is_np_dtype(data.dtype, "M"):\n return DatetimeIndex(data, copy=False, name=self.name)\n\n elif isinstance(data.dtype, DatetimeTZDtype):\n return DatetimeIndex(data, copy=False, name=self.name)\n\n elif lib.is_np_dtype(data.dtype, "m"):\n return TimedeltaIndex(data, copy=False, name=self.name)\n\n elif isinstance(data.dtype, PeriodDtype):\n return PeriodArray(data, copy=False)\n\n raise TypeError(\n f"cannot convert an object of type {type(data)} to a datetimelike index"\n )\n\n def _delegate_property_get(self, name: str):\n from pandas import Series\n\n values = self._get_values()\n\n result = getattr(values, name)\n\n # maybe need to upcast (ints)\n if isinstance(result, np.ndarray):\n if is_integer_dtype(result):\n result = result.astype("int64")\n elif not is_list_like(result):\n return result\n\n result = np.asarray(result)\n\n if self.orig is not None:\n index = self.orig.index\n else:\n index = self._parent.index\n # return the result as a Series\n result = Series(result, index=index, name=self.name).__finalize__(self._parent)\n\n # setting this object will show a SettingWithCopyWarning/Error\n result._is_copy = (\n "modifications to a property of a datetimelike "\n "object are not supported and are discarded. "\n "Change values on the original."\n )\n\n return result\n\n def _delegate_property_set(self, name: str, value, *args, **kwargs):\n raise ValueError(\n "modifications to a property of a datetimelike object are not supported. "\n "Change values on the original."\n )\n\n def _delegate_method(self, name: str, *args, **kwargs):\n from pandas import Series\n\n values = self._get_values()\n\n method = getattr(values, name)\n result = method(*args, **kwargs)\n\n if not is_list_like(result):\n return result\n\n result = Series(result, index=self._parent.index, name=self.name).__finalize__(\n self._parent\n )\n\n # setting this object will show a SettingWithCopyWarning/Error\n result._is_copy = (\n "modifications to a method of a datetimelike "\n "object are not supported and are discarded. "\n "Change values on the original."\n )\n\n return result\n\n\n@delegate_names(\n delegate=ArrowExtensionArray,\n accessors=TimedeltaArray._datetimelike_ops,\n typ="property",\n accessor_mapping=lambda x: f"_dt_{x}",\n raise_on_missing=False,\n)\n@delegate_names(\n delegate=ArrowExtensionArray,\n accessors=TimedeltaArray._datetimelike_methods,\n typ="method",\n accessor_mapping=lambda x: f"_dt_{x}",\n raise_on_missing=False,\n)\n@delegate_names(\n delegate=ArrowExtensionArray,\n accessors=DatetimeArray._datetimelike_ops,\n typ="property",\n accessor_mapping=lambda x: f"_dt_{x}",\n raise_on_missing=False,\n)\n@delegate_names(\n delegate=ArrowExtensionArray,\n accessors=DatetimeArray._datetimelike_methods,\n typ="method",\n accessor_mapping=lambda x: f"_dt_{x}",\n raise_on_missing=False,\n)\nclass ArrowTemporalProperties(PandasDelegate, PandasObject, NoNewAttributesMixin):\n def __init__(self, data: Series, orig) -> None:\n if not isinstance(data, ABCSeries):\n raise TypeError(\n f"cannot convert an object of type {type(data)} to a datetimelike index"\n )\n\n self._parent = data\n self._orig = orig\n self._freeze()\n\n def _delegate_property_get(self, name: str):\n if not hasattr(self._parent.array, f"_dt_{name}"):\n raise NotImplementedError(\n f"dt.{name} is not supported for {self._parent.dtype}"\n )\n result = getattr(self._parent.array, f"_dt_{name}")\n\n if not is_list_like(result):\n return result\n\n if self._orig is not None:\n index = self._orig.index\n else:\n index = self._parent.index\n # return the result as a Series, which is by definition a copy\n result = type(self._parent)(\n result, index=index, name=self._parent.name\n ).__finalize__(self._parent)\n\n return result\n\n def _delegate_method(self, name: str, *args, **kwargs):\n if not hasattr(self._parent.array, f"_dt_{name}"):\n raise NotImplementedError(\n f"dt.{name} is not supported for {self._parent.dtype}"\n )\n\n result = getattr(self._parent.array, f"_dt_{name}")(*args, **kwargs)\n\n if self._orig is not None:\n index = self._orig.index\n else:\n index = self._parent.index\n # return the result as a Series, which is by definition a copy\n result = type(self._parent)(\n result, index=index, name=self._parent.name\n ).__finalize__(self._parent)\n\n return result\n\n def to_pytimedelta(self):\n return cast(ArrowExtensionArray, self._parent.array)._dt_to_pytimedelta()\n\n def to_pydatetime(self):\n # GH#20306\n warnings.warn(\n f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, "\n "in a future version this will return a Series containing python "\n "datetime objects instead of an ndarray. To retain the old behavior, "\n "call `np.array` on the result",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return cast(ArrowExtensionArray, self._parent.array)._dt_to_pydatetime()\n\n def isocalendar(self) -> DataFrame:\n from pandas import DataFrame\n\n result = (\n cast(ArrowExtensionArray, self._parent.array)\n ._dt_isocalendar()\n ._pa_array.combine_chunks()\n )\n iso_calendar_df = DataFrame(\n {\n col: type(self._parent.array)(result.field(i)) # type: ignore[call-arg]\n for i, col in enumerate(["year", "week", "day"])\n }\n )\n return iso_calendar_df\n\n @property\n def components(self) -> DataFrame:\n from pandas import DataFrame\n\n components_df = DataFrame(\n {\n col: getattr(self._parent.array, f"_dt_{col}")\n for col in [\n "days",\n "hours",\n "minutes",\n "seconds",\n "milliseconds",\n "microseconds",\n "nanoseconds",\n ]\n }\n )\n return components_df\n\n\n@delegate_names(\n delegate=DatetimeArray,\n accessors=DatetimeArray._datetimelike_ops + ["unit"],\n typ="property",\n)\n@delegate_names(\n delegate=DatetimeArray,\n accessors=DatetimeArray._datetimelike_methods + ["as_unit"],\n typ="method",\n)\nclass DatetimeProperties(Properties):\n """\n Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n >>> seconds_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="s"))\n >>> seconds_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:00:01\n 2 2000-01-01 00:00:02\n dtype: datetime64[ns]\n >>> seconds_series.dt.second\n 0 0\n 1 1\n 2 2\n dtype: int32\n\n >>> hours_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="h"))\n >>> hours_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 01:00:00\n 2 2000-01-01 02:00:00\n dtype: datetime64[ns]\n >>> hours_series.dt.hour\n 0 0\n 1 1\n 2 2\n dtype: int32\n\n >>> quarters_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="QE"))\n >>> quarters_series\n 0 2000-03-31\n 1 2000-06-30\n 2 2000-09-30\n dtype: datetime64[ns]\n >>> quarters_series.dt.quarter\n 0 1\n 1 2\n 2 3\n dtype: int32\n\n Returns a Series indexed like the original Series.\n Raises TypeError if the Series does not contain datetimelike values.\n """\n\n def to_pydatetime(self) -> np.ndarray:\n """\n Return the data as an array of :class:`datetime.datetime` objects.\n\n .. deprecated:: 2.1.0\n\n The current behavior of dt.to_pydatetime is deprecated.\n In a future version this will return a Series containing python\n datetime objects instead of a ndarray.\n\n Timezone information is retained if present.\n\n .. warning::\n\n Python's datetime uses microsecond resolution, which is lower than\n pandas (nanosecond). The values are truncated.\n\n Returns\n -------\n numpy.ndarray\n Object dtype array containing native Python datetime objects.\n\n See Also\n --------\n datetime.datetime : Standard library value for a datetime.\n\n Examples\n --------\n >>> s = pd.Series(pd.date_range('20180310', periods=2))\n >>> s\n 0 2018-03-10\n 1 2018-03-11\n dtype: datetime64[ns]\n\n >>> s.dt.to_pydatetime()\n array([datetime.datetime(2018, 3, 10, 0, 0),\n datetime.datetime(2018, 3, 11, 0, 0)], dtype=object)\n\n pandas' nanosecond precision is truncated to microseconds.\n\n >>> s = pd.Series(pd.date_range('20180310', periods=2, freq='ns'))\n >>> s\n 0 2018-03-10 00:00:00.000000000\n 1 2018-03-10 00:00:00.000000001\n dtype: datetime64[ns]\n\n >>> s.dt.to_pydatetime()\n array([datetime.datetime(2018, 3, 10, 0, 0),\n datetime.datetime(2018, 3, 10, 0, 0)], dtype=object)\n """\n # GH#20306\n warnings.warn(\n f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, "\n "in a future version this will return a Series containing python "\n "datetime objects instead of an ndarray. To retain the old behavior, "\n "call `np.array` on the result",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._get_values().to_pydatetime()\n\n @property\n def freq(self):\n return self._get_values().inferred_freq\n\n def isocalendar(self) -> DataFrame:\n """\n Calculate year, week, and day according to the ISO 8601 standard.\n\n Returns\n -------\n DataFrame\n With columns year, week and day.\n\n See Also\n --------\n Timestamp.isocalendar : Function return a 3-tuple containing ISO year,\n week number, and weekday for the given Timestamp object.\n datetime.date.isocalendar : Return a named tuple object with\n three components: year, week and weekday.\n\n Examples\n --------\n >>> ser = pd.to_datetime(pd.Series(["2010-01-01", pd.NaT]))\n >>> ser.dt.isocalendar()\n year week day\n 0 2009 53 5\n 1 <NA> <NA> <NA>\n >>> ser.dt.isocalendar().week\n 0 53\n 1 <NA>\n Name: week, dtype: UInt32\n """\n return self._get_values().isocalendar().set_index(self._parent.index)\n\n\n@delegate_names(\n delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ="property"\n)\n@delegate_names(\n delegate=TimedeltaArray,\n accessors=TimedeltaArray._datetimelike_methods,\n typ="method",\n)\nclass TimedeltaProperties(Properties):\n """\n Accessor object for datetimelike properties of the Series values.\n\n Returns a Series indexed like the original Series.\n Raises TypeError if the Series does not contain datetimelike values.\n\n Examples\n --------\n >>> seconds_series = pd.Series(\n ... pd.timedelta_range(start="1 second", periods=3, freq="s")\n ... )\n >>> seconds_series\n 0 0 days 00:00:01\n 1 0 days 00:00:02\n 2 0 days 00:00:03\n dtype: timedelta64[ns]\n >>> seconds_series.dt.seconds\n 0 1\n 1 2\n 2 3\n dtype: int32\n """\n\n def to_pytimedelta(self) -> np.ndarray:\n """\n Return an array of native :class:`datetime.timedelta` objects.\n\n Python's standard `datetime` library uses a different representation\n timedelta's. This method converts a Series of pandas Timedeltas\n to `datetime.timedelta` format with the same length as the original\n Series.\n\n Returns\n -------\n numpy.ndarray\n Array of 1D containing data with `datetime.timedelta` type.\n\n See Also\n --------\n datetime.timedelta : A duration expressing the difference\n between two date, time, or datetime.\n\n Examples\n --------\n >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="d"))\n >>> s\n 0 0 days\n 1 1 days\n 2 2 days\n 3 3 days\n 4 4 days\n dtype: timedelta64[ns]\n\n >>> s.dt.to_pytimedelta()\n array([datetime.timedelta(0), datetime.timedelta(days=1),\n datetime.timedelta(days=2), datetime.timedelta(days=3),\n datetime.timedelta(days=4)], dtype=object)\n """\n return self._get_values().to_pytimedelta()\n\n @property\n def components(self):\n """\n Return a Dataframe of the components of the Timedeltas.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s'))\n >>> s\n 0 0 days 00:00:00\n 1 0 days 00:00:01\n 2 0 days 00:00:02\n 3 0 days 00:00:03\n 4 0 days 00:00:04\n dtype: timedelta64[ns]\n >>> s.dt.components\n days hours minutes seconds milliseconds microseconds nanoseconds\n 0 0 0 0 0 0 0 0\n 1 0 0 0 1 0 0 0\n 2 0 0 0 2 0 0 0\n 3 0 0 0 3 0 0 0\n 4 0 0 0 4 0 0 0\n """\n return (\n self._get_values()\n .components.set_index(self._parent.index)\n .__finalize__(self._parent)\n )\n\n @property\n def freq(self):\n return self._get_values().inferred_freq\n\n\n@delegate_names(\n delegate=PeriodArray, accessors=PeriodArray._datetimelike_ops, typ="property"\n)\n@delegate_names(\n delegate=PeriodArray, accessors=PeriodArray._datetimelike_methods, typ="method"\n)\nclass PeriodProperties(Properties):\n """\n Accessor object for datetimelike properties of the Series values.\n\n Returns a Series indexed like the original Series.\n Raises TypeError if the Series does not contain datetimelike values.\n\n Examples\n --------\n >>> seconds_series = pd.Series(\n ... pd.period_range(\n ... start="2000-01-01 00:00:00", end="2000-01-01 00:00:03", freq="s"\n ... )\n ... )\n >>> seconds_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:00:01\n 2 2000-01-01 00:00:02\n 3 2000-01-01 00:00:03\n dtype: period[s]\n >>> seconds_series.dt.second\n 0 0\n 1 1\n 2 2\n 3 3\n dtype: int64\n\n >>> hours_series = pd.Series(\n ... pd.period_range(start="2000-01-01 00:00", end="2000-01-01 03:00", freq="h")\n ... )\n >>> hours_series\n 0 2000-01-01 00:00\n 1 2000-01-01 01:00\n 2 2000-01-01 02:00\n 3 2000-01-01 03:00\n dtype: period[h]\n >>> hours_series.dt.hour\n 0 0\n 1 1\n 2 2\n 3 3\n dtype: int64\n\n >>> quarters_series = pd.Series(\n ... pd.period_range(start="2000-01-01", end="2000-12-31", freq="Q-DEC")\n ... )\n >>> quarters_series\n 0 2000Q1\n 1 2000Q2\n 2 2000Q3\n 3 2000Q4\n dtype: period[Q-DEC]\n >>> quarters_series.dt.quarter\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n """\n\n\nclass CombinedDatetimelikeProperties(\n DatetimeProperties, TimedeltaProperties, PeriodProperties\n):\n def __new__(cls, data: Series): # pyright: ignore[reportInconsistentConstructor]\n # CombinedDatetimelikeProperties isn't really instantiated. Instead\n # we need to choose which parent (datetime or timedelta) is\n # appropriate. Since we're checking the dtypes anyway, we'll just\n # do all the validation here.\n\n if not isinstance(data, ABCSeries):\n raise TypeError(\n f"cannot convert an object of type {type(data)} to a datetimelike index"\n )\n\n orig = data if isinstance(data.dtype, CategoricalDtype) else None\n if orig is not None:\n data = data._constructor(\n orig.array,\n name=orig.name,\n copy=False,\n dtype=orig._values.categories.dtype,\n index=orig.index,\n )\n\n if isinstance(data.dtype, ArrowDtype) and data.dtype.kind in "Mm":\n return ArrowTemporalProperties(data, orig)\n if lib.is_np_dtype(data.dtype, "M"):\n return DatetimeProperties(data, orig)\n elif isinstance(data.dtype, DatetimeTZDtype):\n return DatetimeProperties(data, orig)\n elif lib.is_np_dtype(data.dtype, "m"):\n return TimedeltaProperties(data, orig)\n elif isinstance(data.dtype, PeriodDtype):\n return PeriodProperties(data, orig)\n\n raise AttributeError("Can only use .dt accessor with datetimelike values")\n | .venv\Lib\site-packages\pandas\core\indexes\accessors.py | accessors.py | Python | 19,152 | 0.95 | 0.090202 | 0.022018 | react-lib | 288 | 2025-07-05T04:58:12.476246 | MIT | false | dfc82e1b9675838ee06fa9a8aebbba05 |
from __future__ import annotations\n\nimport textwrap\nfrom typing import (\n TYPE_CHECKING,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas._libs import (\n NaT,\n lib,\n)\nfrom pandas.errors import InvalidIndexError\n\nfrom pandas.core.dtypes.cast import find_common_type\n\nfrom pandas.core.algorithms import safe_sort\nfrom pandas.core.indexes.base import (\n Index,\n _new_Index,\n ensure_index,\n ensure_index_from_sequences,\n get_unanimous_names,\n)\nfrom pandas.core.indexes.category import CategoricalIndex\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.interval import IntervalIndex\nfrom pandas.core.indexes.multi import MultiIndex\nfrom pandas.core.indexes.period import PeriodIndex\nfrom pandas.core.indexes.range import RangeIndex\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex\n\nif TYPE_CHECKING:\n from pandas._typing import Axis\n_sort_msg = textwrap.dedent(\n """\\nSorting because non-concatenation axis is not aligned. A future version\nof pandas will change to not sort by default.\n\nTo accept the future behavior, pass 'sort=False'.\n\nTo retain the current behavior and silence the warning, pass 'sort=True'.\n"""\n)\n\n\n__all__ = [\n "Index",\n "MultiIndex",\n "CategoricalIndex",\n "IntervalIndex",\n "RangeIndex",\n "InvalidIndexError",\n "TimedeltaIndex",\n "PeriodIndex",\n "DatetimeIndex",\n "_new_Index",\n "NaT",\n "ensure_index",\n "ensure_index_from_sequences",\n "get_objs_combined_axis",\n "union_indexes",\n "get_unanimous_names",\n "all_indexes_same",\n "default_index",\n "safe_sort_index",\n]\n\n\ndef get_objs_combined_axis(\n objs,\n intersect: bool = False,\n axis: Axis = 0,\n sort: bool = True,\n copy: bool = False,\n) -> Index:\n """\n Extract combined index: return intersection or union (depending on the\n value of "intersect") of indexes on given axis, or None if all objects\n lack indexes (e.g. they are numpy arrays).\n\n Parameters\n ----------\n objs : list\n Series or DataFrame objects, may be mix of the two.\n intersect : bool, default False\n If True, calculate the intersection between indexes. Otherwise,\n calculate the union.\n axis : {0 or 'index', 1 or 'outer'}, default 0\n The axis to extract indexes from.\n sort : bool, default True\n Whether the result index should come out sorted or not.\n copy : bool, default False\n If True, return a copy of the combined index.\n\n Returns\n -------\n Index\n """\n obs_idxes = [obj._get_axis(axis) for obj in objs]\n return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy)\n\n\ndef _get_distinct_objs(objs: list[Index]) -> list[Index]:\n """\n Return a list with distinct elements of "objs" (different ids).\n Preserves order.\n """\n ids: set[int] = set()\n res = []\n for obj in objs:\n if id(obj) not in ids:\n ids.add(id(obj))\n res.append(obj)\n return res\n\n\ndef _get_combined_index(\n indexes: list[Index],\n intersect: bool = False,\n sort: bool = False,\n copy: bool = False,\n) -> Index:\n """\n Return the union or intersection of indexes.\n\n Parameters\n ----------\n indexes : list of Index or list objects\n When intersect=True, do not accept list of lists.\n intersect : bool, default False\n If True, calculate the intersection between indexes. Otherwise,\n calculate the union.\n sort : bool, default False\n Whether the result index should come out sorted or not.\n copy : bool, default False\n If True, return a copy of the combined index.\n\n Returns\n -------\n Index\n """\n # TODO: handle index names!\n indexes = _get_distinct_objs(indexes)\n if len(indexes) == 0:\n index = Index([])\n elif len(indexes) == 1:\n index = indexes[0]\n elif intersect:\n index = indexes[0]\n for other in indexes[1:]:\n index = index.intersection(other)\n else:\n index = union_indexes(indexes, sort=False)\n index = ensure_index(index)\n\n if sort:\n index = safe_sort_index(index)\n # GH 29879\n if copy:\n index = index.copy()\n\n return index\n\n\ndef safe_sort_index(index: Index) -> Index:\n """\n Returns the sorted index\n\n We keep the dtypes and the name attributes.\n\n Parameters\n ----------\n index : an Index\n\n Returns\n -------\n Index\n """\n if index.is_monotonic_increasing:\n return index\n\n try:\n array_sorted = safe_sort(index)\n except TypeError:\n pass\n else:\n if isinstance(array_sorted, Index):\n return array_sorted\n\n array_sorted = cast(np.ndarray, array_sorted)\n if isinstance(index, MultiIndex):\n index = MultiIndex.from_tuples(array_sorted, names=index.names)\n else:\n index = Index(array_sorted, name=index.name, dtype=index.dtype)\n\n return index\n\n\ndef union_indexes(indexes, sort: bool | None = True) -> Index:\n """\n Return the union of indexes.\n\n The behavior of sort and names is not consistent.\n\n Parameters\n ----------\n indexes : list of Index or list objects\n sort : bool, default True\n Whether the result index should come out sorted or not.\n\n Returns\n -------\n Index\n """\n if len(indexes) == 0:\n raise AssertionError("Must have at least 1 Index to union")\n if len(indexes) == 1:\n result = indexes[0]\n if isinstance(result, list):\n if not sort:\n result = Index(result)\n else:\n result = Index(sorted(result))\n return result\n\n indexes, kind = _sanitize_and_check(indexes)\n\n def _unique_indices(inds, dtype) -> Index:\n """\n Concatenate indices and remove duplicates.\n\n Parameters\n ----------\n inds : list of Index or list objects\n dtype : dtype to set for the resulting Index\n\n Returns\n -------\n Index\n """\n if all(isinstance(ind, Index) for ind in inds):\n inds = [ind.astype(dtype, copy=False) for ind in inds]\n result = inds[0].unique()\n other = inds[1].append(inds[2:])\n diff = other[result.get_indexer_for(other) == -1]\n if len(diff):\n result = result.append(diff.unique())\n if sort:\n result = result.sort_values()\n return result\n\n def conv(i):\n if isinstance(i, Index):\n i = i.tolist()\n return i\n\n return Index(\n lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort),\n dtype=dtype,\n )\n\n def _find_common_index_dtype(inds):\n """\n Finds a common type for the indexes to pass through to resulting index.\n\n Parameters\n ----------\n inds: list of Index or list objects\n\n Returns\n -------\n The common type or None if no indexes were given\n """\n dtypes = [idx.dtype for idx in indexes if isinstance(idx, Index)]\n if dtypes:\n dtype = find_common_type(dtypes)\n else:\n dtype = None\n\n return dtype\n\n if kind == "special":\n result = indexes[0]\n\n dtis = [x for x in indexes if isinstance(x, DatetimeIndex)]\n dti_tzs = [x for x in dtis if x.tz is not None]\n if len(dti_tzs) not in [0, len(dtis)]:\n # TODO: this behavior is not tested (so may not be desired),\n # but is kept in order to keep behavior the same when\n # deprecating union_many\n # test_frame_from_dict_with_mixed_indexes\n raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")\n\n if len(dtis) == len(indexes):\n sort = True\n result = indexes[0]\n\n elif len(dtis) > 1:\n # If we have mixed timezones, our casting behavior may depend on\n # the order of indexes, which we don't want.\n sort = False\n\n # TODO: what about Categorical[dt64]?\n # test_frame_from_dict_with_mixed_indexes\n indexes = [x.astype(object, copy=False) for x in indexes]\n result = indexes[0]\n\n for other in indexes[1:]:\n result = result.union(other, sort=None if sort else False)\n return result\n\n elif kind == "array":\n dtype = _find_common_index_dtype(indexes)\n index = indexes[0]\n if not all(index.equals(other) for other in indexes[1:]):\n index = _unique_indices(indexes, dtype)\n\n name = get_unanimous_names(*indexes)[0]\n if name != index.name:\n index = index.rename(name)\n return index\n else: # kind='list'\n dtype = _find_common_index_dtype(indexes)\n return _unique_indices(indexes, dtype)\n\n\ndef _sanitize_and_check(indexes):\n """\n Verify the type of indexes and convert lists to Index.\n\n Cases:\n\n - [list, list, ...]: Return ([list, list, ...], 'list')\n - [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])\n Lists are sorted and converted to Index.\n - [Index, Index, ...]: Return ([Index, Index, ...], TYPE)\n TYPE = 'special' if at least one special type, 'array' otherwise.\n\n Parameters\n ----------\n indexes : list of Index or list objects\n\n Returns\n -------\n sanitized_indexes : list of Index or list objects\n type : {'list', 'array', 'special'}\n """\n kinds = list({type(index) for index in indexes})\n\n if list in kinds:\n if len(kinds) > 1:\n indexes = [\n Index(list(x)) if not isinstance(x, Index) else x for x in indexes\n ]\n kinds.remove(list)\n else:\n return indexes, "list"\n\n if len(kinds) > 1 or Index not in kinds:\n return indexes, "special"\n else:\n return indexes, "array"\n\n\ndef all_indexes_same(indexes) -> bool:\n """\n Determine if all indexes contain the same elements.\n\n Parameters\n ----------\n indexes : iterable of Index objects\n\n Returns\n -------\n bool\n True if all indexes contain the same elements, False otherwise.\n """\n itr = iter(indexes)\n first = next(itr)\n return all(first.equals(index) for index in itr)\n\n\ndef default_index(n: int) -> RangeIndex:\n rng = range(n)\n return RangeIndex._simple_new(rng, name=None)\n | .venv\Lib\site-packages\pandas\core\indexes\api.py | api.py | Python | 10,426 | 0.95 | 0.164948 | 0.031153 | react-lib | 534 | 2024-01-20T23:37:37.846134 | GPL-3.0 | false | 74596049054e42ababd06189489eb268 |
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Literal,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas._libs import index as libindex\nfrom pandas.util._decorators import (\n cache_readonly,\n doc,\n)\n\nfrom pandas.core.dtypes.common import is_scalar\nfrom pandas.core.dtypes.concat import concat_compat\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom pandas.core.dtypes.missing import (\n is_valid_na_for_dtype,\n isna,\n)\n\nfrom pandas.core.arrays.categorical import (\n Categorical,\n contains,\n)\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexes.base import (\n Index,\n maybe_extract_name,\n)\nfrom pandas.core.indexes.extension import (\n NDArrayBackedExtensionIndex,\n inherit_names,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n from pandas._typing import (\n Dtype,\n DtypeObj,\n Self,\n npt,\n )\n\n\n@inherit_names(\n [\n "argsort",\n "tolist",\n "codes",\n "categories",\n "ordered",\n "_reverse_indexer",\n "searchsorted",\n "min",\n "max",\n ],\n Categorical,\n)\n@inherit_names(\n [\n "rename_categories",\n "reorder_categories",\n "add_categories",\n "remove_categories",\n "remove_unused_categories",\n "set_categories",\n "as_ordered",\n "as_unordered",\n ],\n Categorical,\n wrap=True,\n)\nclass CategoricalIndex(NDArrayBackedExtensionIndex):\n """\n Index based on an underlying :class:`Categorical`.\n\n CategoricalIndex, like Categorical, can only take on a limited,\n and usually fixed, number of possible values (`categories`). Also,\n like Categorical, it might have an order, but numerical operations\n (additions, divisions, ...) are not possible.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n The values of the categorical. If `categories` are given, values not in\n `categories` will be replaced with NaN.\n categories : index-like, optional\n The categories for the categorical. Items need to be unique.\n If the categories are not given here (and also not in `dtype`), they\n will be inferred from the `data`.\n ordered : bool, optional\n Whether or not this categorical is treated as an ordered\n categorical. If not given here or in `dtype`, the resulting\n categorical will be unordered.\n dtype : CategoricalDtype or "category", optional\n If :class:`CategoricalDtype`, cannot be used together with\n `categories` or `ordered`.\n copy : bool, default False\n Make a copy of input ndarray.\n name : object, optional\n Name to be stored in the index.\n\n Attributes\n ----------\n codes\n categories\n ordered\n\n Methods\n -------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n as_ordered\n as_unordered\n map\n\n Raises\n ------\n ValueError\n If the categories do not validate.\n TypeError\n If an explicit ``ordered=True`` is given but no `categories` and the\n `values` are not sortable.\n\n See Also\n --------\n Index : The base pandas Index type.\n Categorical : A categorical array.\n CategoricalDtype : Type for categorical data.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`__\n for more.\n\n Examples\n --------\n >>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])\n CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],\n categories=['a', 'b', 'c'], ordered=False, dtype='category')\n\n ``CategoricalIndex`` can also be instantiated from a ``Categorical``:\n\n >>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"])\n >>> pd.CategoricalIndex(c)\n CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],\n categories=['a', 'b', 'c'], ordered=False, dtype='category')\n\n Ordered ``CategoricalIndex`` can have a min and max value.\n\n >>> ci = pd.CategoricalIndex(\n ... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]\n ... )\n >>> ci\n CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],\n categories=['c', 'b', 'a'], ordered=True, dtype='category')\n >>> ci.min()\n 'c'\n """\n\n _typ = "categoricalindex"\n _data_cls = Categorical\n\n @property\n def _can_hold_strings(self):\n return self.categories._can_hold_strings\n\n @cache_readonly\n def _should_fallback_to_positional(self) -> bool:\n return self.categories._should_fallback_to_positional\n\n codes: np.ndarray\n categories: Index\n ordered: bool | None\n _data: Categorical\n _values: Categorical\n\n @property\n def _engine_type(self) -> type[libindex.IndexEngine]:\n # self.codes can have dtype int8, int16, int32 or int64, so we need\n # to return the corresponding engine type (libindex.Int8Engine, etc.).\n return {\n np.int8: libindex.Int8Engine,\n np.int16: libindex.Int16Engine,\n np.int32: libindex.Int32Engine,\n np.int64: libindex.Int64Engine,\n }[self.codes.dtype.type]\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data=None,\n categories=None,\n ordered=None,\n dtype: Dtype | None = None,\n copy: bool = False,\n name: Hashable | None = None,\n ) -> Self:\n name = maybe_extract_name(name, data, cls)\n\n if is_scalar(data):\n # GH#38944 include None here, which pre-2.0 subbed in []\n cls._raise_scalar_data_error(data)\n\n data = Categorical(\n data, categories=categories, ordered=ordered, dtype=dtype, copy=copy\n )\n\n return cls._simple_new(data, name=name)\n\n # --------------------------------------------------------------------\n\n def _is_dtype_compat(self, other: Index) -> Categorical:\n """\n *this is an internal non-public method*\n\n provide a comparison between the dtype of self and other (coercing if\n needed)\n\n Parameters\n ----------\n other : Index\n\n Returns\n -------\n Categorical\n\n Raises\n ------\n TypeError if the dtypes are not compatible\n """\n if isinstance(other.dtype, CategoricalDtype):\n cat = extract_array(other)\n cat = cast(Categorical, cat)\n if not cat._categories_match_up_to_permutation(self._values):\n raise TypeError(\n "categories must match existing categories when appending"\n )\n\n elif other._is_multi:\n # preempt raising NotImplementedError in isna call\n raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex")\n else:\n values = other\n\n cat = Categorical(other, dtype=self.dtype)\n other = CategoricalIndex(cat)\n if not other.isin(values).all():\n raise TypeError(\n "cannot append a non-category item to a CategoricalIndex"\n )\n cat = other._values\n\n if not ((cat == values) | (isna(cat) & isna(values))).all():\n # GH#37667 see test_equals_non_category\n raise TypeError(\n "categories must match existing categories when appending"\n )\n\n return cat\n\n def equals(self, other: object) -> bool:\n """\n Determine if two CategoricalIndex objects contain the same elements.\n\n Returns\n -------\n bool\n ``True`` if two :class:`pandas.CategoricalIndex` objects have equal\n elements, ``False`` otherwise.\n\n Examples\n --------\n >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])\n >>> ci2 = pd.CategoricalIndex(pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']))\n >>> ci.equals(ci2)\n True\n\n The order of elements matters.\n\n >>> ci3 = pd.CategoricalIndex(['c', 'b', 'a', 'a', 'b', 'c'])\n >>> ci.equals(ci3)\n False\n\n The orderedness also matters.\n\n >>> ci4 = ci.as_ordered()\n >>> ci.equals(ci4)\n False\n\n The categories matter, but the order of the categories matters only when\n ``ordered=True``.\n\n >>> ci5 = ci.set_categories(['a', 'b', 'c', 'd'])\n >>> ci.equals(ci5)\n False\n\n >>> ci6 = ci.set_categories(['b', 'c', 'a'])\n >>> ci.equals(ci6)\n True\n >>> ci_ordered = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],\n ... ordered=True)\n >>> ci2_ordered = ci_ordered.set_categories(['b', 'c', 'a'])\n >>> ci_ordered.equals(ci2_ordered)\n False\n """\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n\n try:\n other = self._is_dtype_compat(other)\n except (TypeError, ValueError):\n return False\n\n return self._data.equals(other)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n @property\n def _formatter_func(self):\n return self.categories._formatter_func\n\n def _format_attrs(self):\n """\n Return a list of tuples of the (attr,formatted_value)\n """\n attrs: list[tuple[str, str | int | bool | None]]\n\n attrs = [\n (\n "categories",\n f"[{', '.join(self._data._repr_categories())}]",\n ),\n ("ordered", self.ordered),\n ]\n extra = super()._format_attrs()\n return attrs + extra\n\n # --------------------------------------------------------------------\n\n @property\n def inferred_type(self) -> str:\n return "categorical"\n\n @doc(Index.__contains__)\n def __contains__(self, key: Any) -> bool:\n # if key is a NaN, check if any NaN is in self.\n if is_valid_na_for_dtype(key, self.categories.dtype):\n return self.hasnans\n\n return contains(self, key, container=self._engine)\n\n def reindex(\n self, target, method=None, level=None, limit: int | None = None, tolerance=None\n ) -> tuple[Index, npt.NDArray[np.intp] | None]:\n """\n Create index with target's values (move/add/delete values as necessary)\n\n Returns\n -------\n new_index : pd.Index\n Resulting index\n indexer : np.ndarray[np.intp] or None\n Indices of output values in original index\n\n """\n if method is not None:\n raise NotImplementedError(\n "argument method is not implemented for CategoricalIndex.reindex"\n )\n if level is not None:\n raise NotImplementedError(\n "argument level is not implemented for CategoricalIndex.reindex"\n )\n if limit is not None:\n raise NotImplementedError(\n "argument limit is not implemented for CategoricalIndex.reindex"\n )\n return super().reindex(target)\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def _maybe_cast_indexer(self, key) -> int:\n # GH#41933: we have to do this instead of self._data._validate_scalar\n # because this will correctly get partial-indexing on Interval categories\n try:\n return self._data._unbox_scalar(key)\n except KeyError:\n if is_valid_na_for_dtype(key, self.categories.dtype):\n return -1\n raise\n\n def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex:\n if isinstance(values, CategoricalIndex):\n values = values._data\n if isinstance(values, Categorical):\n # Indexing on codes is more efficient if categories are the same,\n # so we can apply some optimizations based on the degree of\n # dtype-matching.\n cat = self._data._encode_with_my_categories(values)\n codes = cat._codes\n else:\n codes = self.categories.get_indexer(values)\n codes = codes.astype(self.codes.dtype, copy=False)\n cat = self._data._from_backing_data(codes)\n return type(self)._simple_new(cat)\n\n # --------------------------------------------------------------------\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n return self.categories._is_comparable_dtype(dtype)\n\n def map(self, mapper, na_action: Literal["ignore"] | None = None):\n """\n Map values using input an input mapping or function.\n\n Maps the values (their categories, not the codes) of the index to new\n categories. If the mapping correspondence is one-to-one the result is a\n :class:`~pandas.CategoricalIndex` which has the same order property as\n the original, otherwise an :class:`~pandas.Index` is returned.\n\n If a `dict` or :class:`~pandas.Series` is used any unmapped category is\n mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`\n will be returned.\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n\n Returns\n -------\n pandas.CategoricalIndex or pandas.Index\n Mapped index.\n\n See Also\n --------\n Index.map : Apply a mapping correspondence on an\n :class:`~pandas.Index`.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n Series.apply : Apply more complex functions on a\n :class:`~pandas.Series`.\n\n Examples\n --------\n >>> idx = pd.CategoricalIndex(['a', 'b', 'c'])\n >>> idx\n CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],\n ordered=False, dtype='category')\n >>> idx.map(lambda x: x.upper())\n CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],\n ordered=False, dtype='category')\n >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})\n CategoricalIndex(['first', 'second', 'third'], categories=['first',\n 'second', 'third'], ordered=False, dtype='category')\n\n If the mapping is one-to-one the ordering of the categories is\n preserved:\n\n >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)\n >>> idx\n CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],\n ordered=True, dtype='category')\n >>> idx.map({'a': 3, 'b': 2, 'c': 1})\n CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,\n dtype='category')\n\n If the mapping is not one-to-one an :class:`~pandas.Index` is returned:\n\n >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})\n Index(['first', 'second', 'first'], dtype='object')\n\n If a `dict` is used, all unmapped categories are mapped to `NaN` and\n the result is an :class:`~pandas.Index`:\n\n >>> idx.map({'a': 'first', 'b': 'second'})\n Index(['first', 'second', nan], dtype='object')\n """\n mapped = self._values.map(mapper, na_action=na_action)\n return Index(mapped, name=self.name)\n\n def _concat(self, to_concat: list[Index], name: Hashable) -> Index:\n # if calling index is category, don't check dtype of others\n try:\n cat = Categorical._concat_same_type(\n [self._is_dtype_compat(c) for c in to_concat]\n )\n except TypeError:\n # not all to_concat elements are among our categories (or NA)\n\n res = concat_compat([x._values for x in to_concat])\n return Index(res, name=name)\n else:\n return type(self)._simple_new(cat, name=name)\n | .venv\Lib\site-packages\pandas\core\indexes\category.py | category.py | Python | 16,128 | 0.95 | 0.128655 | 0.053738 | react-lib | 866 | 2025-04-12T05:52:12.489284 | GPL-3.0 | false | 690f0f2e5a7ef418f7b54ad324b7760d |
"""\nBase and utility classes for tseries type pandas objects.\n"""\nfrom __future__ import annotations\n\nfrom abc import (\n ABC,\n abstractmethod,\n)\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n cast,\n final,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import using_copy_on_write\n\nfrom pandas._libs import (\n NaT,\n Timedelta,\n lib,\n)\nfrom pandas._libs.tslibs import (\n BaseOffset,\n Resolution,\n Tick,\n parsing,\n to_offset,\n)\nfrom pandas._libs.tslibs.dtypes import freq_to_period_freqstr\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import (\n InvalidIndexError,\n NullFrequencyError,\n)\nfrom pandas.util._decorators import (\n Appender,\n cache_readonly,\n doc,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n is_integer,\n is_list_like,\n)\nfrom pandas.core.dtypes.concat import concat_compat\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nfrom pandas.core.arrays import (\n DatetimeArray,\n ExtensionArray,\n PeriodArray,\n TimedeltaArray,\n)\nfrom pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin\nimport pandas.core.common as com\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n _index_shared_docs,\n)\nfrom pandas.core.indexes.extension import NDArrayBackedExtensionIndex\nfrom pandas.core.indexes.range import RangeIndex\nfrom pandas.core.tools.timedeltas import to_timedelta\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n from datetime import datetime\n\n from pandas._typing import (\n Axis,\n Self,\n npt,\n )\n\n from pandas import CategoricalIndex\n\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n\n\nclass DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex, ABC):\n """\n Common ops mixin to support a unified interface datetimelike Index.\n """\n\n _can_hold_strings = False\n _data: DatetimeArray | TimedeltaArray | PeriodArray\n\n @doc(DatetimeLikeArrayMixin.mean)\n def mean(self, *, skipna: bool = True, axis: int | None = 0):\n return self._data.mean(skipna=skipna, axis=axis)\n\n @property\n def freq(self) -> BaseOffset | None:\n return self._data.freq\n\n @freq.setter\n def freq(self, value) -> None:\n # error: Property "freq" defined in "PeriodArray" is read-only [misc]\n self._data.freq = value # type: ignore[misc]\n\n @property\n def asi8(self) -> npt.NDArray[np.int64]:\n return self._data.asi8\n\n @property\n @doc(DatetimeLikeArrayMixin.freqstr)\n def freqstr(self) -> str:\n from pandas import PeriodIndex\n\n if self._data.freqstr is not None and isinstance(\n self._data, (PeriodArray, PeriodIndex)\n ):\n freq = freq_to_period_freqstr(self._data.freq.n, self._data.freq.name)\n return freq\n else:\n return self._data.freqstr # type: ignore[return-value]\n\n @cache_readonly\n @abstractmethod\n def _resolution_obj(self) -> Resolution:\n ...\n\n @cache_readonly\n @doc(DatetimeLikeArrayMixin.resolution)\n def resolution(self) -> str:\n return self._data.resolution\n\n # ------------------------------------------------------------------------\n\n @cache_readonly\n def hasnans(self) -> bool:\n return self._data._hasna\n\n def equals(self, other: Any) -> bool:\n """\n Determines if two Index objects contain the same elements.\n """\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n elif other.dtype.kind in "iufc":\n return False\n elif not isinstance(other, type(self)):\n should_try = False\n inferable = self._data._infer_matches\n if other.dtype == object:\n should_try = other.inferred_type in inferable\n elif isinstance(other.dtype, CategoricalDtype):\n other = cast("CategoricalIndex", other)\n should_try = other.categories.inferred_type in inferable\n\n if should_try:\n try:\n other = type(self)(other)\n except (ValueError, TypeError, OverflowError):\n # e.g.\n # ValueError -> cannot parse str entry, or OutOfBoundsDatetime\n # TypeError -> trying to convert IntervalIndex to DatetimeIndex\n # OverflowError -> Index([very_large_timedeltas])\n return False\n\n if self.dtype != other.dtype:\n # have different timezone\n return False\n\n return np.array_equal(self.asi8, other.asi8)\n\n @Appender(Index.__contains__.__doc__)\n def __contains__(self, key: Any) -> bool:\n hash(key)\n try:\n self.get_loc(key)\n except (KeyError, TypeError, ValueError, InvalidIndexError):\n return False\n return True\n\n def _convert_tolerance(self, tolerance, target):\n tolerance = np.asarray(to_timedelta(tolerance).to_numpy())\n return super()._convert_tolerance(tolerance, target)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n _default_na_rep = "NaT"\n\n def format(\n self,\n name: bool = False,\n formatter: Callable | None = None,\n na_rep: str = "NaT",\n date_format: str | None = None,\n ) -> list[str]:\n """\n Render a string representation of the Index.\n """\n warnings.warn(\n # GH#55413\n f"{type(self).__name__}.format is deprecated and will be removed "\n "in a future version. Convert using index.astype(str) or "\n "index.map(formatter) instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n header = []\n if name:\n header.append(\n ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))\n if self.name is not None\n else ""\n )\n\n if formatter is not None:\n return header + list(self.map(formatter))\n\n return self._format_with_header(\n header=header, na_rep=na_rep, date_format=date_format\n )\n\n def _format_with_header(\n self, *, header: list[str], na_rep: str, date_format: str | None = None\n ) -> list[str]:\n # TODO: not reached in tests 2023-10-11\n # matches base class except for whitespace padding and date_format\n return header + list(\n self._get_values_for_csv(na_rep=na_rep, date_format=date_format)\n )\n\n @property\n def _formatter_func(self):\n return self._data._formatter()\n\n def _format_attrs(self):\n """\n Return a list of tuples of the (attr,formatted_value).\n """\n attrs = super()._format_attrs()\n for attrib in self._attributes:\n # iterating over _attributes prevents us from doing this for PeriodIndex\n if attrib == "freq":\n freq = self.freqstr\n if freq is not None:\n freq = repr(freq) # e.g. D -> 'D'\n attrs.append(("freq", freq))\n return attrs\n\n @Appender(Index._summary.__doc__)\n def _summary(self, name=None) -> str:\n result = super()._summary(name=name)\n if self.freq:\n result += f"\nFreq: {self.freqstr}"\n\n return result\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n @final\n def _can_partial_date_slice(self, reso: Resolution) -> bool:\n # e.g. test_getitem_setitem_periodindex\n # History of conversation GH#3452, GH#3931, GH#2369, GH#14826\n return reso > self._resolution_obj\n # NB: for DTI/PI, not TDI\n\n def _parsed_string_to_bounds(self, reso: Resolution, parsed):\n raise NotImplementedError\n\n def _parse_with_reso(self, label: str):\n # overridden by TimedeltaIndex\n try:\n if self.freq is None or hasattr(self.freq, "rule_code"):\n freq = self.freq\n except NotImplementedError:\n freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))\n\n freqstr: str | None\n if freq is not None and not isinstance(freq, str):\n freqstr = freq.rule_code\n else:\n freqstr = freq\n\n if isinstance(label, np.str_):\n # GH#45580\n label = str(label)\n\n parsed, reso_str = parsing.parse_datetime_string_with_reso(label, freqstr)\n reso = Resolution.from_attrname(reso_str)\n return parsed, reso\n\n def _get_string_slice(self, key: str):\n # overridden by TimedeltaIndex\n parsed, reso = self._parse_with_reso(key)\n try:\n return self._partial_date_slice(reso, parsed)\n except KeyError as err:\n raise KeyError(key) from err\n\n @final\n def _partial_date_slice(\n self,\n reso: Resolution,\n parsed: datetime,\n ) -> slice | npt.NDArray[np.intp]:\n """\n Parameters\n ----------\n reso : Resolution\n parsed : datetime\n\n Returns\n -------\n slice or ndarray[intp]\n """\n if not self._can_partial_date_slice(reso):\n raise ValueError\n\n t1, t2 = self._parsed_string_to_bounds(reso, parsed)\n vals = self._data._ndarray\n unbox = self._data._unbox\n\n if self.is_monotonic_increasing:\n if len(self) and (\n (t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])\n ):\n # we are out of range\n raise KeyError\n\n # TODO: does this depend on being monotonic _increasing_?\n\n # a monotonic (sorted) series can be sliced\n left = vals.searchsorted(unbox(t1), side="left")\n right = vals.searchsorted(unbox(t2), side="right")\n return slice(left, right)\n\n else:\n lhs_mask = vals >= unbox(t1)\n rhs_mask = vals <= unbox(t2)\n\n # try to find the dates\n return (lhs_mask & rhs_mask).nonzero()[0]\n\n def _maybe_cast_slice_bound(self, label, side: str):\n """\n If label is a string, cast it to scalar type according to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n """\n if isinstance(label, str):\n try:\n parsed, reso = self._parse_with_reso(label)\n except ValueError as err:\n # DTI -> parsing.DateParseError\n # TDI -> 'unit abbreviation w/o a number'\n # PI -> string cannot be parsed as datetime-like\n self._raise_invalid_indexer("slice", label, err)\n\n lower, upper = self._parsed_string_to_bounds(reso, parsed)\n return lower if side == "left" else upper\n elif not isinstance(label, self._data._recognized_scalars):\n self._raise_invalid_indexer("slice", label)\n\n return label\n\n # --------------------------------------------------------------------\n # Arithmetic Methods\n\n def shift(self, periods: int = 1, freq=None) -> Self:\n """\n Shift index by desired number of time frequency increments.\n\n This method is for shifting the values of datetime-like indexes\n by a specified time increment a given number of times.\n\n Parameters\n ----------\n periods : int, default 1\n Number of periods (or increments) to shift by,\n can be positive or negative.\n freq : pandas.DateOffset, pandas.Timedelta or string, optional\n Frequency increment to shift by.\n If None, the index is shifted by its own `freq` attribute.\n Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.\n\n Returns\n -------\n pandas.DatetimeIndex\n Shifted index.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n PeriodIndex.shift : Shift values of PeriodIndex.\n """\n raise NotImplementedError\n\n # --------------------------------------------------------------------\n\n @doc(Index._maybe_cast_listlike_indexer)\n def _maybe_cast_listlike_indexer(self, keyarr):\n try:\n res = self._data._validate_listlike(keyarr, allow_object=True)\n except (ValueError, TypeError):\n if not isinstance(keyarr, ExtensionArray):\n # e.g. we don't want to cast DTA to ndarray[object]\n res = com.asarray_tuplesafe(keyarr)\n # TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray\n else:\n res = keyarr\n return Index(res, dtype=res.dtype)\n\n\nclass DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, ABC):\n """\n Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,\n but not PeriodIndex\n """\n\n _data: DatetimeArray | TimedeltaArray\n _comparables = ["name", "freq"]\n _attributes = ["name", "freq"]\n\n # Compat for frequency inference, see GH#23789\n _is_monotonic_increasing = Index.is_monotonic_increasing\n _is_monotonic_decreasing = Index.is_monotonic_decreasing\n _is_unique = Index.is_unique\n\n @property\n def unit(self) -> str:\n return self._data.unit\n\n def as_unit(self, unit: str) -> Self:\n """\n Convert to a dtype with the given unit resolution.\n\n Parameters\n ----------\n unit : {'s', 'ms', 'us', 'ns'}\n\n Returns\n -------\n same type as self\n\n Examples\n --------\n For :class:`pandas.DatetimeIndex`:\n\n >>> idx = pd.DatetimeIndex(['2020-01-02 01:02:03.004005006'])\n >>> idx\n DatetimeIndex(['2020-01-02 01:02:03.004005006'],\n dtype='datetime64[ns]', freq=None)\n >>> idx.as_unit('s')\n DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None)\n\n For :class:`pandas.TimedeltaIndex`:\n\n >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns'])\n >>> tdelta_idx\n TimedeltaIndex(['1 days 00:03:00.000002042'],\n dtype='timedelta64[ns]', freq=None)\n >>> tdelta_idx.as_unit('s')\n TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)\n """\n arr = self._data.as_unit(unit)\n return type(self)._simple_new(arr, name=self.name)\n\n def _with_freq(self, freq):\n arr = self._data._with_freq(freq)\n return type(self)._simple_new(arr, name=self._name)\n\n @property\n def values(self) -> np.ndarray:\n # NB: For Datetime64TZ this is lossy\n data = self._data._ndarray\n if using_copy_on_write():\n data = data.view()\n data.flags.writeable = False\n return data\n\n @doc(DatetimeIndexOpsMixin.shift)\n def shift(self, periods: int = 1, freq=None) -> Self:\n if freq is not None and freq != self.freq:\n if isinstance(freq, str):\n freq = to_offset(freq)\n offset = periods * freq\n return self + offset\n\n if periods == 0 or len(self) == 0:\n # GH#14811 empty case\n return self.copy()\n\n if self.freq is None:\n raise NullFrequencyError("Cannot shift with no freq")\n\n start = self[0] + periods * self.freq\n end = self[-1] + periods * self.freq\n\n # Note: in the DatetimeTZ case, _generate_range will infer the\n # appropriate timezone from `start` and `end`, so tz does not need\n # to be passed explicitly.\n result = self._data._generate_range(\n start=start, end=end, periods=None, freq=self.freq, unit=self.unit\n )\n return type(self)._simple_new(result, name=self.name)\n\n @cache_readonly\n @doc(DatetimeLikeArrayMixin.inferred_freq)\n def inferred_freq(self) -> str | None:\n return self._data.inferred_freq\n\n # --------------------------------------------------------------------\n # Set Operation Methods\n\n @cache_readonly\n def _as_range_index(self) -> RangeIndex:\n # Convert our i8 representations to RangeIndex\n # Caller is responsible for checking isinstance(self.freq, Tick)\n freq = cast(Tick, self.freq)\n tick = Timedelta(freq).as_unit("ns")._value\n rng = range(self[0]._value, self[-1]._value + tick, tick)\n return RangeIndex(rng)\n\n def _can_range_setop(self, other) -> bool:\n return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)\n\n def _wrap_range_setop(self, other, res_i8) -> Self:\n new_freq = None\n if not len(res_i8):\n # RangeIndex defaults to step=1, which we don't want.\n new_freq = self.freq\n elif isinstance(res_i8, RangeIndex):\n new_freq = to_offset(Timedelta(res_i8.step))\n\n # TODO(GH#41493): we cannot just do\n # type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)\n # because test_setops_preserve_freq fails with _validate_frequency raising.\n # This raising is incorrect, as 'on_freq' is incorrect. This will\n # be fixed by GH#41493\n res_values = res_i8.values.view(self._data._ndarray.dtype)\n result = type(self._data)._simple_new(\n # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has\n # incompatible type "Union[dtype[Any], ExtensionDtype]"; expected\n # "Union[dtype[datetime64], DatetimeTZDtype]"\n res_values,\n dtype=self.dtype, # type: ignore[arg-type]\n freq=new_freq, # type: ignore[arg-type]\n )\n return cast("Self", self._wrap_setop_result(other, result))\n\n def _range_intersect(self, other, sort) -> Self:\n # Dispatch to RangeIndex intersection logic.\n left = self._as_range_index\n right = other._as_range_index\n res_i8 = left.intersection(right, sort=sort)\n return self._wrap_range_setop(other, res_i8)\n\n def _range_union(self, other, sort) -> Self:\n # Dispatch to RangeIndex union logic.\n left = self._as_range_index\n right = other._as_range_index\n res_i8 = left.union(right, sort=sort)\n return self._wrap_range_setop(other, res_i8)\n\n def _intersection(self, other: Index, sort: bool = False) -> Index:\n """\n intersection specialized to the case with matching dtypes and both non-empty.\n """\n other = cast("DatetimeTimedeltaMixin", other)\n\n if self._can_range_setop(other):\n return self._range_intersect(other, sort=sort)\n\n if not self._can_fast_intersect(other):\n result = Index._intersection(self, other, sort=sort)\n # We need to invalidate the freq because Index._intersection\n # uses _shallow_copy on a view of self._data, which will preserve\n # self.freq if we're not careful.\n # At this point we should have result.dtype == self.dtype\n # and type(result) is type(self._data)\n result = self._wrap_setop_result(other, result)\n return result._with_freq(None)._with_freq("infer")\n\n else:\n return self._fast_intersect(other, sort)\n\n def _fast_intersect(self, other, sort):\n # to make our life easier, "sort" the two ranges\n if self[0] <= other[0]:\n left, right = self, other\n else:\n left, right = other, self\n\n # after sorting, the intersection always starts with the right index\n # and ends with the index of which the last elements is smallest\n end = min(left[-1], right[-1])\n start = right[0]\n\n if end < start:\n result = self[:0]\n else:\n lslice = slice(*left.slice_locs(start, end))\n result = left._values[lslice]\n\n return result\n\n def _can_fast_intersect(self, other: Self) -> bool:\n # Note: we only get here with len(self) > 0 and len(other) > 0\n if self.freq is None:\n return False\n\n elif other.freq != self.freq:\n return False\n\n elif not self.is_monotonic_increasing:\n # Because freq is not None, we must then be monotonic decreasing\n return False\n\n # this along with matching freqs ensure that we "line up",\n # so intersection will preserve freq\n # Note we are assuming away Ticks, as those go through _range_intersect\n # GH#42104\n return self.freq.n == 1\n\n def _can_fast_union(self, other: Self) -> bool:\n # Assumes that type(self) == type(other), as per the annotation\n # The ability to fast_union also implies that `freq` should be\n # retained on union.\n freq = self.freq\n\n if freq is None or freq != other.freq:\n return False\n\n if not self.is_monotonic_increasing:\n # Because freq is not None, we must then be monotonic decreasing\n # TODO: do union on the reversed indexes?\n return False\n\n if len(self) == 0 or len(other) == 0:\n # only reached via union_many\n return True\n\n # to make our life easier, "sort" the two ranges\n if self[0] <= other[0]:\n left, right = self, other\n else:\n left, right = other, self\n\n right_start = right[0]\n left_end = left[-1]\n\n # Only need to "adjoin", not overlap\n return (right_start == left_end + freq) or right_start in left\n\n def _fast_union(self, other: Self, sort=None) -> Self:\n # Caller is responsible for ensuring self and other are non-empty\n\n # to make our life easier, "sort" the two ranges\n if self[0] <= other[0]:\n left, right = self, other\n elif sort is False:\n # TDIs are not in the "correct" order and we don't want\n # to sort but want to remove overlaps\n left, right = self, other\n left_start = left[0]\n loc = right.searchsorted(left_start, side="left")\n right_chunk = right._values[:loc]\n dates = concat_compat((left._values, right_chunk))\n result = type(self)._simple_new(dates, name=self.name)\n return result\n else:\n left, right = other, self\n\n left_end = left[-1]\n right_end = right[-1]\n\n # concatenate\n if left_end < right_end:\n loc = right.searchsorted(left_end, side="right")\n right_chunk = right._values[loc:]\n dates = concat_compat([left._values, right_chunk])\n # The can_fast_union check ensures that the result.freq\n # should match self.freq\n assert isinstance(dates, type(self._data))\n # error: Item "ExtensionArray" of "ExtensionArray |\n # ndarray[Any, Any]" has no attribute "_freq"\n assert dates._freq == self.freq # type: ignore[union-attr]\n result = type(self)._simple_new(dates)\n return result\n else:\n return left\n\n def _union(self, other, sort):\n # We are called by `union`, which is responsible for this validation\n assert isinstance(other, type(self))\n assert self.dtype == other.dtype\n\n if self._can_range_setop(other):\n return self._range_union(other, sort=sort)\n\n if self._can_fast_union(other):\n result = self._fast_union(other, sort=sort)\n # in the case with sort=None, the _can_fast_union check ensures\n # that result.freq == self.freq\n return result\n else:\n return super()._union(other, sort)._with_freq("infer")\n\n # --------------------------------------------------------------------\n # Join Methods\n\n def _get_join_freq(self, other):\n """\n Get the freq to attach to the result of a join operation.\n """\n freq = None\n if self._can_fast_union(other):\n freq = self.freq\n return freq\n\n def _wrap_joined_index(\n self, joined, other, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp]\n ):\n assert other.dtype == self.dtype, (other.dtype, self.dtype)\n result = super()._wrap_joined_index(joined, other, lidx, ridx)\n result._data._freq = self._get_join_freq(other)\n return result\n\n def _get_engine_target(self) -> np.ndarray:\n # engine methods and libjoin methods need dt64/td64 values cast to i8\n return self._data._ndarray.view("i8")\n\n def _from_join_target(self, result: np.ndarray):\n # view e.g. i8 back to M8[ns]\n result = result.view(self._data._ndarray.dtype)\n return self._data._from_backing_data(result)\n\n # --------------------------------------------------------------------\n # List-like Methods\n\n def _get_delete_freq(self, loc: int | slice | Sequence[int]):\n """\n Find the `freq` for self.delete(loc).\n """\n freq = None\n if self.freq is not None:\n if is_integer(loc):\n if loc in (0, -len(self), -1, len(self) - 1):\n freq = self.freq\n else:\n if is_list_like(loc):\n # error: Incompatible types in assignment (expression has\n # type "Union[slice, ndarray]", variable has type\n # "Union[int, slice, Sequence[int]]")\n loc = lib.maybe_indices_to_slice( # type: ignore[assignment]\n np.asarray(loc, dtype=np.intp), len(self)\n )\n if isinstance(loc, slice) and loc.step in (1, None):\n if loc.start in (0, None) or loc.stop in (len(self), None):\n freq = self.freq\n return freq\n\n def _get_insert_freq(self, loc: int, item):\n """\n Find the `freq` for self.insert(loc, item).\n """\n value = self._data._validate_scalar(item)\n item = self._data._box_func(value)\n\n freq = None\n if self.freq is not None:\n # freq can be preserved on edge cases\n if self.size:\n if item is NaT:\n pass\n elif loc in (0, -len(self)) and item + self.freq == self[0]:\n freq = self.freq\n elif (loc == len(self)) and item - self.freq == self[-1]:\n freq = self.freq\n else:\n # Adding a single item to an empty index may preserve freq\n if isinstance(self.freq, Tick):\n # all TimedeltaIndex cases go through here; is_on_offset\n # would raise TypeError\n freq = self.freq\n elif self.freq.is_on_offset(item):\n freq = self.freq\n return freq\n\n @doc(NDArrayBackedExtensionIndex.delete)\n def delete(self, loc) -> Self:\n result = super().delete(loc)\n result._data._freq = self._get_delete_freq(loc)\n return result\n\n @doc(NDArrayBackedExtensionIndex.insert)\n def insert(self, loc: int, item):\n result = super().insert(loc, item)\n if isinstance(result, type(self)):\n # i.e. parent class method did not cast\n result._data._freq = self._get_insert_freq(loc, item)\n return result\n\n # --------------------------------------------------------------------\n # NDArray-Like Methods\n\n @Appender(_index_shared_docs["take"] % _index_doc_kwargs)\n def take(\n self,\n indices,\n axis: Axis = 0,\n allow_fill: bool = True,\n fill_value=None,\n **kwargs,\n ) -> Self:\n nv.validate_take((), kwargs)\n indices = np.asarray(indices, dtype=np.intp)\n\n result = NDArrayBackedExtensionIndex.take(\n self, indices, axis, allow_fill, fill_value, **kwargs\n )\n\n maybe_slice = lib.maybe_indices_to_slice(indices, len(self))\n if isinstance(maybe_slice, slice):\n freq = self._data._get_getitem_freq(maybe_slice)\n result._data._freq = freq\n return result\n | .venv\Lib\site-packages\pandas\core\indexes\datetimelike.py | datetimelike.py | Python | 28,377 | 0.95 | 0.158956 | 0.148725 | node-utils | 230 | 2024-01-22T03:27:09.453814 | MIT | false | 942fb08bd57ac7bd28f871e1ec7cf3be |
from __future__ import annotations\n\nimport datetime as dt\nimport operator\nfrom typing import TYPE_CHECKING\nimport warnings\n\nimport numpy as np\nimport pytz\n\nfrom pandas._libs import (\n NaT,\n Period,\n Timestamp,\n index as libindex,\n lib,\n)\nfrom pandas._libs.tslibs import (\n Resolution,\n Tick,\n Timedelta,\n periods_per_day,\n timezones,\n to_offset,\n)\nfrom pandas._libs.tslibs.offsets import prefix_mapping\nfrom pandas.util._decorators import (\n cache_readonly,\n doc,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import is_scalar\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.generic import ABCSeries\nfrom pandas.core.dtypes.missing import is_valid_na_for_dtype\n\nfrom pandas.core.arrays.datetimes import (\n DatetimeArray,\n tz_to_dtype,\n)\nimport pandas.core.common as com\nfrom pandas.core.indexes.base import (\n Index,\n maybe_extract_name,\n)\nfrom pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin\nfrom pandas.core.indexes.extension import inherit_names\nfrom pandas.core.tools.times import to_time\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n from pandas._typing import (\n Dtype,\n DtypeObj,\n Frequency,\n IntervalClosedType,\n Self,\n TimeAmbiguous,\n TimeNonexistent,\n npt,\n )\n\n from pandas.core.api import (\n DataFrame,\n PeriodIndex,\n )\n\nfrom pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR\n\n\ndef _new_DatetimeIndex(cls, d):\n """\n This is called upon unpickling, rather than the default which doesn't\n have arguments and breaks __new__\n """\n if "data" in d and not isinstance(d["data"], DatetimeIndex):\n # Avoid need to verify integrity by calling simple_new directly\n data = d.pop("data")\n if not isinstance(data, DatetimeArray):\n # For backward compat with older pickles, we may need to construct\n # a DatetimeArray to adapt to the newer _simple_new signature\n tz = d.pop("tz")\n freq = d.pop("freq")\n dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)\n else:\n dta = data\n for key in ["tz", "freq"]:\n # These are already stored in our DatetimeArray; if they are\n # also in the pickle and don't match, we have a problem.\n if key in d:\n assert d[key] == getattr(dta, key)\n d.pop(key)\n result = cls._simple_new(dta, **d)\n else:\n with warnings.catch_warnings():\n # TODO: If we knew what was going in to **d, we might be able to\n # go through _simple_new instead\n warnings.simplefilter("ignore")\n result = cls.__new__(cls, **d)\n\n return result\n\n\n@inherit_names(\n DatetimeArray._field_ops\n + [\n method\n for method in DatetimeArray._datetimelike_methods\n if method not in ("tz_localize", "tz_convert", "strftime")\n ],\n DatetimeArray,\n wrap=True,\n)\n@inherit_names(["is_normalized"], DatetimeArray, cache=True)\n@inherit_names(\n [\n "tz",\n "tzinfo",\n "dtype",\n "to_pydatetime",\n "date",\n "time",\n "timetz",\n "std",\n ]\n + DatetimeArray._bool_ops,\n DatetimeArray,\n)\nclass DatetimeIndex(DatetimeTimedeltaMixin):\n """\n Immutable ndarray-like of datetime64 data.\n\n Represented internally as int64, and which can be boxed to Timestamp objects\n that are subclasses of datetime and carry metadata.\n\n .. versionchanged:: 2.0.0\n The various numeric date/time attributes (:attr:`~DatetimeIndex.day`,\n :attr:`~DatetimeIndex.month`, :attr:`~DatetimeIndex.year` etc.) now have dtype\n ``int32``. Previously they had dtype ``int64``.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n Datetime-like data to construct index with.\n freq : str or pandas offset object, optional\n One of pandas date offset strings or corresponding objects. The string\n 'infer' can be passed in order to set the frequency of the index as the\n inferred frequency upon creation.\n tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str\n Set the Timezone of the data.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n\n .. deprecated:: 2.1.0\n\n closed : {'left', 'right'}, optional\n Set whether to include `start` and `end` that are on the\n boundary. The default includes boundary points on either end.\n\n .. deprecated:: 2.1.0\n\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from 03:00\n DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC\n and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter\n dictates how ambiguous times should be handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False signifies a\n non-DST time (note that this flag is only applicable for ambiguous\n times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous times.\n dayfirst : bool, default False\n If True, parse dates in `data` with the day first order.\n yearfirst : bool, default False\n If True parse dates in `data` with the year first order.\n dtype : numpy.dtype or DatetimeTZDtype or str, default None\n Note that the only NumPy dtype allowed is `datetime64[ns]`.\n copy : bool, default False\n Make a copy of input ndarray.\n name : label, default None\n Name to be stored in the index.\n\n Attributes\n ----------\n year\n month\n day\n hour\n minute\n second\n microsecond\n nanosecond\n date\n time\n timetz\n dayofyear\n day_of_year\n dayofweek\n day_of_week\n weekday\n quarter\n tz\n freq\n freqstr\n is_month_start\n is_month_end\n is_quarter_start\n is_quarter_end\n is_year_start\n is_year_end\n is_leap_year\n inferred_freq\n\n Methods\n -------\n normalize\n strftime\n snap\n tz_convert\n tz_localize\n round\n floor\n ceil\n to_period\n to_pydatetime\n to_series\n to_frame\n month_name\n day_name\n mean\n std\n\n See Also\n --------\n Index : The base pandas Index type.\n TimedeltaIndex : Index of timedelta64 data.\n PeriodIndex : Index of Period data.\n to_datetime : Convert argument to datetime.\n date_range : Create a fixed-frequency DatetimeIndex.\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])\n >>> idx\n DatetimeIndex(['2020-01-01 10:00:00+00:00', '2020-02-01 11:00:00+00:00'],\n dtype='datetime64[ns, UTC]', freq=None)\n """\n\n _typ = "datetimeindex"\n\n _data_cls = DatetimeArray\n _supports_partial_string_indexing = True\n\n @property\n def _engine_type(self) -> type[libindex.DatetimeEngine]:\n return libindex.DatetimeEngine\n\n _data: DatetimeArray\n _values: DatetimeArray\n tz: dt.tzinfo | None\n\n # --------------------------------------------------------------------\n # methods that dispatch to DatetimeArray and wrap result\n\n @doc(DatetimeArray.strftime)\n def strftime(self, date_format) -> Index:\n arr = self._data.strftime(date_format)\n return Index(arr, name=self.name, dtype=arr.dtype)\n\n @doc(DatetimeArray.tz_convert)\n def tz_convert(self, tz) -> Self:\n arr = self._data.tz_convert(tz)\n return type(self)._simple_new(arr, name=self.name, refs=self._references)\n\n @doc(DatetimeArray.tz_localize)\n def tz_localize(\n self,\n tz,\n ambiguous: TimeAmbiguous = "raise",\n nonexistent: TimeNonexistent = "raise",\n ) -> Self:\n arr = self._data.tz_localize(tz, ambiguous, nonexistent)\n return type(self)._simple_new(arr, name=self.name)\n\n @doc(DatetimeArray.to_period)\n def to_period(self, freq=None) -> PeriodIndex:\n from pandas.core.indexes.api import PeriodIndex\n\n arr = self._data.to_period(freq)\n return PeriodIndex._simple_new(arr, name=self.name)\n\n @doc(DatetimeArray.to_julian_date)\n def to_julian_date(self) -> Index:\n arr = self._data.to_julian_date()\n return Index._simple_new(arr, name=self.name)\n\n @doc(DatetimeArray.isocalendar)\n def isocalendar(self) -> DataFrame:\n df = self._data.isocalendar()\n return df.set_index(self)\n\n @cache_readonly\n def _resolution_obj(self) -> Resolution:\n return self._data._resolution_obj\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data=None,\n freq: Frequency | lib.NoDefault = lib.no_default,\n tz=lib.no_default,\n normalize: bool | lib.NoDefault = lib.no_default,\n closed=lib.no_default,\n ambiguous: TimeAmbiguous = "raise",\n dayfirst: bool = False,\n yearfirst: bool = False,\n dtype: Dtype | None = None,\n copy: bool = False,\n name: Hashable | None = None,\n ) -> Self:\n if closed is not lib.no_default:\n # GH#52628\n warnings.warn(\n f"The 'closed' keyword in {cls.__name__} construction is "\n "deprecated and will be removed in a future version.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n if normalize is not lib.no_default:\n # GH#52628\n warnings.warn(\n f"The 'normalize' keyword in {cls.__name__} construction is "\n "deprecated and will be removed in a future version.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if is_scalar(data):\n cls._raise_scalar_data_error(data)\n\n # - Cases checked above all return/raise before reaching here - #\n\n name = maybe_extract_name(name, data, cls)\n\n if (\n isinstance(data, DatetimeArray)\n and freq is lib.no_default\n and tz is lib.no_default\n and dtype is None\n ):\n # fastpath, similar logic in TimedeltaIndex.__new__;\n # Note in this particular case we retain non-nano.\n if copy:\n data = data.copy()\n return cls._simple_new(data, name=name)\n\n dtarr = DatetimeArray._from_sequence_not_strict(\n data,\n dtype=dtype,\n copy=copy,\n tz=tz,\n freq=freq,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n ambiguous=ambiguous,\n )\n refs = None\n if not copy and isinstance(data, (Index, ABCSeries)):\n refs = data._references\n\n subarr = cls._simple_new(dtarr, name=name, refs=refs)\n return subarr\n\n # --------------------------------------------------------------------\n\n @cache_readonly\n def _is_dates_only(self) -> bool:\n """\n Return a boolean if we are only dates (and don't have a timezone)\n\n Returns\n -------\n bool\n """\n if isinstance(self.freq, Tick):\n delta = Timedelta(self.freq)\n\n if delta % dt.timedelta(days=1) != dt.timedelta(days=0):\n return False\n\n return self._values._is_dates_only\n\n def __reduce__(self):\n d = {"data": self._data, "name": self.name}\n return _new_DatetimeIndex, (type(self), d), None\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n """\n Can we compare values of the given dtype to our own?\n """\n if self.tz is not None:\n # If we have tz, we can compare to tzaware\n return isinstance(dtype, DatetimeTZDtype)\n # if we dont have tz, we can only compare to tznaive\n return lib.is_np_dtype(dtype, "M")\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n @cache_readonly\n def _formatter_func(self):\n # Note this is equivalent to the DatetimeIndexOpsMixin method but\n # uses the maybe-cached self._is_dates_only instead of re-computing it.\n from pandas.io.formats.format import get_format_datetime64\n\n formatter = get_format_datetime64(is_dates_only=self._is_dates_only)\n return lambda x: f"'{formatter(x)}'"\n\n # --------------------------------------------------------------------\n # Set Operation Methods\n\n def _can_range_setop(self, other) -> bool:\n # GH 46702: If self or other have non-UTC tzs, DST transitions prevent\n # range representation due to no singular step\n if (\n self.tz is not None\n and not timezones.is_utc(self.tz)\n and not timezones.is_fixed_offset(self.tz)\n ):\n return False\n if (\n other.tz is not None\n and not timezones.is_utc(other.tz)\n and not timezones.is_fixed_offset(other.tz)\n ):\n return False\n return super()._can_range_setop(other)\n\n # --------------------------------------------------------------------\n\n def _get_time_micros(self) -> npt.NDArray[np.int64]:\n """\n Return the number of microseconds since midnight.\n\n Returns\n -------\n ndarray[int64_t]\n """\n values = self._data._local_timestamps()\n\n ppd = periods_per_day(self._data._creso)\n\n frac = values % ppd\n if self.unit == "ns":\n micros = frac // 1000\n elif self.unit == "us":\n micros = frac\n elif self.unit == "ms":\n micros = frac * 1000\n elif self.unit == "s":\n micros = frac * 1_000_000\n else: # pragma: no cover\n raise NotImplementedError(self.unit)\n\n micros[self._isnan] = -1\n return micros\n\n def snap(self, freq: Frequency = "S") -> DatetimeIndex:\n """\n Snap time stamps to nearest occurring frequency.\n\n Returns\n -------\n DatetimeIndex\n\n Examples\n --------\n >>> idx = pd.DatetimeIndex(['2023-01-01', '2023-01-02',\n ... '2023-02-01', '2023-02-02'])\n >>> idx\n DatetimeIndex(['2023-01-01', '2023-01-02', '2023-02-01', '2023-02-02'],\n dtype='datetime64[ns]', freq=None)\n >>> idx.snap('MS')\n DatetimeIndex(['2023-01-01', '2023-01-01', '2023-02-01', '2023-02-01'],\n dtype='datetime64[ns]', freq=None)\n """\n # Superdumb, punting on any optimizing\n freq = to_offset(freq)\n\n dta = self._data.copy()\n\n for i, v in enumerate(self):\n s = v\n if not freq.is_on_offset(s):\n t0 = freq.rollback(s)\n t1 = freq.rollforward(s)\n if abs(s - t0) < abs(t1 - s):\n s = t0\n else:\n s = t1\n dta[i] = s\n\n return DatetimeIndex._simple_new(dta, name=self.name)\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime):\n """\n Calculate datetime bounds for parsed time string and its resolution.\n\n Parameters\n ----------\n reso : Resolution\n Resolution provided by parsed string.\n parsed : datetime\n Datetime from parsed string.\n\n Returns\n -------\n lower, upper: pd.Timestamp\n """\n freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev)\n per = Period(parsed, freq=freq)\n start, end = per.start_time, per.end_time\n\n # GH 24076\n # If an incoming date string contained a UTC offset, need to localize\n # the parsed date to this offset first before aligning with the index's\n # timezone\n start = start.tz_localize(parsed.tzinfo)\n end = end.tz_localize(parsed.tzinfo)\n\n if parsed.tzinfo is not None:\n if self.tz is None:\n raise ValueError(\n "The index must be timezone aware when indexing "\n "with a date string with a UTC offset"\n )\n # The flipped case with parsed.tz is None and self.tz is not None\n # is ruled out bc parsed and reso are produced by _parse_with_reso,\n # which localizes parsed.\n return start, end\n\n def _parse_with_reso(self, label: str):\n parsed, reso = super()._parse_with_reso(label)\n\n parsed = Timestamp(parsed)\n\n if self.tz is not None and parsed.tzinfo is None:\n # we special-case timezone-naive strings and timezone-aware\n # DatetimeIndex\n # https://github.com/pandas-dev/pandas/pull/36148#issuecomment-687883081\n parsed = parsed.tz_localize(self.tz)\n\n return parsed, reso\n\n def _disallow_mismatched_indexing(self, key) -> None:\n """\n Check for mismatched-tzawareness indexing and re-raise as KeyError.\n """\n # we get here with isinstance(key, self._data._recognized_scalars)\n try:\n # GH#36148\n self._data._assert_tzawareness_compat(key)\n except TypeError as err:\n raise KeyError(key) from err\n\n def get_loc(self, key):\n """\n Get integer location for requested label\n\n Returns\n -------\n loc : int\n """\n self._check_indexing_error(key)\n\n orig_key = key\n if is_valid_na_for_dtype(key, self.dtype):\n key = NaT\n\n if isinstance(key, self._data._recognized_scalars):\n # needed to localize naive datetimes\n self._disallow_mismatched_indexing(key)\n key = Timestamp(key)\n\n elif isinstance(key, str):\n try:\n parsed, reso = self._parse_with_reso(key)\n except (ValueError, pytz.NonExistentTimeError) as err:\n raise KeyError(key) from err\n self._disallow_mismatched_indexing(parsed)\n\n if self._can_partial_date_slice(reso):\n try:\n return self._partial_date_slice(reso, parsed)\n except KeyError as err:\n raise KeyError(key) from err\n\n key = parsed\n\n elif isinstance(key, dt.timedelta):\n # GH#20464\n raise TypeError(\n f"Cannot index {type(self).__name__} with {type(key).__name__}"\n )\n\n elif isinstance(key, dt.time):\n return self.indexer_at_time(key)\n\n else:\n # unrecognized type\n raise KeyError(key)\n\n try:\n return Index.get_loc(self, key)\n except KeyError as err:\n raise KeyError(orig_key) from err\n\n @doc(DatetimeTimedeltaMixin._maybe_cast_slice_bound)\n def _maybe_cast_slice_bound(self, label, side: str):\n # GH#42855 handle date here instead of get_slice_bound\n if isinstance(label, dt.date) and not isinstance(label, dt.datetime):\n # Pandas supports slicing with dates, treated as datetimes at midnight.\n # https://github.com/pandas-dev/pandas/issues/31501\n label = Timestamp(label).to_pydatetime()\n\n label = super()._maybe_cast_slice_bound(label, side)\n self._data._assert_tzawareness_compat(label)\n return Timestamp(label)\n\n def slice_indexer(self, start=None, end=None, step=None):\n """\n Return indexer for specified label slice.\n Index.slice_indexer, customized to handle time slicing.\n\n In addition to functionality provided by Index.slice_indexer, does the\n following:\n\n - if both `start` and `end` are instances of `datetime.time`, it\n invokes `indexer_between_time`\n - if `start` and `end` are both either string or None perform\n value-based selection in non-monotonic cases.\n\n """\n # For historical reasons DatetimeIndex supports slices between two\n # instances of datetime.time as if it were applying a slice mask to\n # an array of (self.hour, self.minute, self.seconds, self.microsecond).\n if isinstance(start, dt.time) and isinstance(end, dt.time):\n if step is not None and step != 1:\n raise ValueError("Must have step size of 1 with time slices")\n return self.indexer_between_time(start, end)\n\n if isinstance(start, dt.time) or isinstance(end, dt.time):\n raise KeyError("Cannot mix time and non-time slice keys")\n\n def check_str_or_none(point) -> bool:\n return point is not None and not isinstance(point, str)\n\n # GH#33146 if start and end are combinations of str and None and Index is not\n # monotonic, we can not use Index.slice_indexer because it does not honor the\n # actual elements, is only searching for start and end\n if (\n check_str_or_none(start)\n or check_str_or_none(end)\n or self.is_monotonic_increasing\n ):\n return Index.slice_indexer(self, start, end, step)\n\n mask = np.array(True)\n in_index = True\n if start is not None:\n start_casted = self._maybe_cast_slice_bound(start, "left")\n mask = start_casted <= self\n in_index &= (start_casted == self).any()\n\n if end is not None:\n end_casted = self._maybe_cast_slice_bound(end, "right")\n mask = (self <= end_casted) & mask\n in_index &= (end_casted == self).any()\n\n if not in_index:\n raise KeyError(\n "Value based partial slicing on non-monotonic DatetimeIndexes "\n "with non-existing keys is not allowed.",\n )\n indexer = mask.nonzero()[0][::step]\n if len(indexer) == len(self):\n return slice(None)\n else:\n return indexer\n\n # --------------------------------------------------------------------\n\n @property\n def inferred_type(self) -> str:\n # b/c datetime is represented as microseconds since the epoch, make\n # sure we can't have ambiguous indexing\n return "datetime64"\n\n def indexer_at_time(self, time, asof: bool = False) -> npt.NDArray[np.intp]:\n """\n Return index locations of values at particular time of day.\n\n Parameters\n ----------\n time : datetime.time or str\n Time passed in either as object (datetime.time) or as string in\n appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",\n "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p").\n\n Returns\n -------\n np.ndarray[np.intp]\n\n See Also\n --------\n indexer_between_time : Get index locations of values between particular\n times of day.\n DataFrame.at_time : Select values at particular time of day.\n\n Examples\n --------\n >>> idx = pd.DatetimeIndex(["1/1/2020 10:00", "2/1/2020 11:00",\n ... "3/1/2020 10:00"])\n >>> idx.indexer_at_time("10:00")\n array([0, 2])\n """\n if asof:\n raise NotImplementedError("'asof' argument is not supported")\n\n if isinstance(time, str):\n from dateutil.parser import parse\n\n time = parse(time).time()\n\n if time.tzinfo:\n if self.tz is None:\n raise ValueError("Index must be timezone aware.")\n time_micros = self.tz_convert(time.tzinfo)._get_time_micros()\n else:\n time_micros = self._get_time_micros()\n micros = _time_to_micros(time)\n return (time_micros == micros).nonzero()[0]\n\n def indexer_between_time(\n self, start_time, end_time, include_start: bool = True, include_end: bool = True\n ) -> npt.NDArray[np.intp]:\n """\n Return index locations of values between particular times of day.\n\n Parameters\n ----------\n start_time, end_time : datetime.time, str\n Time passed either as object (datetime.time) or as string in\n appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",\n "%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").\n include_start : bool, default True\n include_end : bool, default True\n\n Returns\n -------\n np.ndarray[np.intp]\n\n See Also\n --------\n indexer_at_time : Get index locations of values at particular time of day.\n DataFrame.between_time : Select values between particular times of day.\n\n Examples\n --------\n >>> idx = pd.date_range("2023-01-01", periods=4, freq="h")\n >>> idx\n DatetimeIndex(['2023-01-01 00:00:00', '2023-01-01 01:00:00',\n '2023-01-01 02:00:00', '2023-01-01 03:00:00'],\n dtype='datetime64[ns]', freq='h')\n >>> idx.indexer_between_time("00:00", "2:00", include_end=False)\n array([0, 1])\n """\n start_time = to_time(start_time)\n end_time = to_time(end_time)\n time_micros = self._get_time_micros()\n start_micros = _time_to_micros(start_time)\n end_micros = _time_to_micros(end_time)\n\n if include_start and include_end:\n lop = rop = operator.le\n elif include_start:\n lop = operator.le\n rop = operator.lt\n elif include_end:\n lop = operator.lt\n rop = operator.le\n else:\n lop = rop = operator.lt\n\n if start_time <= end_time:\n join_op = operator.and_\n else:\n join_op = operator.or_\n\n mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))\n\n return mask.nonzero()[0]\n\n\ndef date_range(\n start=None,\n end=None,\n periods=None,\n freq=None,\n tz=None,\n normalize: bool = False,\n name: Hashable | None = None,\n inclusive: IntervalClosedType = "both",\n *,\n unit: str | None = None,\n **kwargs,\n) -> DatetimeIndex:\n """\n Return a fixed frequency DatetimeIndex.\n\n Returns the range of equally spaced time points (where the difference between any\n two adjacent points is specified by the given frequency) such that they all\n satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp.,\n the first and last time points in that range that fall on the boundary of ``freq``\n (if given as a frequency string) or that are valid for ``freq`` (if given as a\n :class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``,\n ``end``, or ``freq`` is *not* specified, this missing parameter can be computed\n given ``periods``, the number of timesteps in the range. See the note below.)\n\n Parameters\n ----------\n start : str or datetime-like, optional\n Left bound for generating dates.\n end : str or datetime-like, optional\n Right bound for generating dates.\n periods : int, optional\n Number of periods to generate.\n freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D'\n Frequency strings can have multiples, e.g. '5h'. See\n :ref:`here <timeseries.offset_aliases>` for a list of\n frequency aliases.\n tz : str or tzinfo, optional\n Time zone name for returning localized DatetimeIndex, for example\n 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is\n timezone-naive unless timezone-aware datetime-likes are passed.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting DatetimeIndex.\n inclusive : {"both", "neither", "left", "right"}, default "both"\n Include boundaries; Whether to set each bound as closed or open.\n\n .. versionadded:: 1.4.0\n unit : str, default None\n Specify the desired resolution of the result.\n\n .. versionadded:: 2.0.0\n **kwargs\n For compatibility. Has no effect on the result.\n\n Returns\n -------\n DatetimeIndex\n\n See Also\n --------\n DatetimeIndex : An immutable container for datetimes.\n timedelta_range : Return a fixed frequency TimedeltaIndex.\n period_range : Return a fixed frequency PeriodIndex.\n interval_range : Return a fixed frequency IntervalIndex.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``DatetimeIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end`` (closed on both sides).\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n **Specifying the values**\n\n The next four examples generate the same `DatetimeIndex`, but vary\n the combination of `start`, `end` and `periods`.\n\n Specify `start` and `end`, with the default daily frequency.\n\n >>> pd.date_range(start='1/1/2018', end='1/08/2018')\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],\n dtype='datetime64[ns]', freq='D')\n\n Specify timezone-aware `start` and `end`, with the default daily frequency.\n\n >>> pd.date_range(\n ... start=pd.to_datetime("1/1/2018").tz_localize("Europe/Berlin"),\n ... end=pd.to_datetime("1/08/2018").tz_localize("Europe/Berlin"),\n ... )\n DatetimeIndex(['2018-01-01 00:00:00+01:00', '2018-01-02 00:00:00+01:00',\n '2018-01-03 00:00:00+01:00', '2018-01-04 00:00:00+01:00',\n '2018-01-05 00:00:00+01:00', '2018-01-06 00:00:00+01:00',\n '2018-01-07 00:00:00+01:00', '2018-01-08 00:00:00+01:00'],\n dtype='datetime64[ns, Europe/Berlin]', freq='D')\n\n Specify `start` and `periods`, the number of periods (days).\n\n >>> pd.date_range(start='1/1/2018', periods=8)\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `end` and `periods`, the number of periods (days).\n\n >>> pd.date_range(end='1/1/2018', periods=8)\n DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',\n '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `start`, `end`, and `periods`; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)\n DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',\n '2018-04-27 00:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Other Parameters**\n\n Changed the `freq` (frequency) to ``'ME'`` (month end frequency).\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq='ME')\n DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',\n '2018-05-31'],\n dtype='datetime64[ns]', freq='ME')\n\n Multiples are allowed\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq='3ME')\n DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',\n '2019-01-31'],\n dtype='datetime64[ns]', freq='3ME')\n\n `freq` can also be specified as an Offset object.\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))\n DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',\n '2019-01-31'],\n dtype='datetime64[ns]', freq='3ME')\n\n Specify `tz` to set the timezone.\n\n >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')\n DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',\n '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',\n '2018-01-05 00:00:00+09:00'],\n dtype='datetime64[ns, Asia/Tokyo]', freq='D')\n\n `inclusive` controls whether to include `start` and `end` that are on the\n boundary. The default, "both", includes boundary points on either end.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive="both")\n DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],\n dtype='datetime64[ns]', freq='D')\n\n Use ``inclusive='left'`` to exclude `end` if it falls on the boundary.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='left')\n DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],\n dtype='datetime64[ns]', freq='D')\n\n Use ``inclusive='right'`` to exclude `start` if it falls on the boundary, and\n similarly ``inclusive='neither'`` will exclude both `start` and `end`.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='right')\n DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],\n dtype='datetime64[ns]', freq='D')\n\n **Specify a unit**\n\n >>> pd.date_range(start="2017-01-01", periods=10, freq="100YS", unit="s")\n DatetimeIndex(['2017-01-01', '2117-01-01', '2217-01-01', '2317-01-01',\n '2417-01-01', '2517-01-01', '2617-01-01', '2717-01-01',\n '2817-01-01', '2917-01-01'],\n dtype='datetime64[s]', freq='100YS-JAN')\n """\n if freq is None and com.any_none(periods, start, end):\n freq = "D"\n\n dtarr = DatetimeArray._generate_range(\n start=start,\n end=end,\n periods=periods,\n freq=freq,\n tz=tz,\n normalize=normalize,\n inclusive=inclusive,\n unit=unit,\n **kwargs,\n )\n return DatetimeIndex._simple_new(dtarr, name=name)\n\n\ndef bdate_range(\n start=None,\n end=None,\n periods: int | None = None,\n freq: Frequency | dt.timedelta = "B",\n tz=None,\n normalize: bool = True,\n name: Hashable | None = None,\n weekmask=None,\n holidays=None,\n inclusive: IntervalClosedType = "both",\n **kwargs,\n) -> DatetimeIndex:\n """\n Return a fixed frequency DatetimeIndex with business day as the default.\n\n Parameters\n ----------\n start : str or datetime-like, default None\n Left bound for generating dates.\n end : str or datetime-like, default None\n Right bound for generating dates.\n periods : int, default None\n Number of periods to generate.\n freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'B'\n Frequency strings can have multiples, e.g. '5h'. The default is\n business daily ('B').\n tz : str or None\n Time zone name for returning localized DatetimeIndex, for example\n Asia/Beijing.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting DatetimeIndex.\n weekmask : str or None, default None\n Weekmask of valid business days, passed to ``numpy.busdaycalendar``,\n only used when custom frequency strings are passed. The default\n value None is equivalent to 'Mon Tue Wed Thu Fri'.\n holidays : list-like or None, default None\n Dates to exclude from the set of valid business days, passed to\n ``numpy.busdaycalendar``, only used when custom frequency strings\n are passed.\n inclusive : {"both", "neither", "left", "right"}, default "both"\n Include boundaries; Whether to set each bound as closed or open.\n\n .. versionadded:: 1.4.0\n **kwargs\n For compatibility. Has no effect on the result.\n\n Returns\n -------\n DatetimeIndex\n\n Notes\n -----\n Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. Specifying ``freq`` is a requirement\n for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not\n desired.\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Note how the two weekend days are skipped in the result.\n\n >>> pd.bdate_range(start='1/1/2018', end='1/08/2018')\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-08'],\n dtype='datetime64[ns]', freq='B')\n """\n if freq is None:\n msg = "freq must be specified for bdate_range; use date_range instead"\n raise TypeError(msg)\n\n if isinstance(freq, str) and freq.startswith("C"):\n try:\n weekmask = weekmask or "Mon Tue Wed Thu Fri"\n freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)\n except (KeyError, TypeError) as err:\n msg = f"invalid custom frequency string: {freq}"\n raise ValueError(msg) from err\n elif holidays or weekmask:\n msg = (\n "a custom frequency string is required when holidays or "\n f"weekmask are passed, got frequency {freq}"\n )\n raise ValueError(msg)\n\n return date_range(\n start=start,\n end=end,\n periods=periods,\n freq=freq,\n tz=tz,\n normalize=normalize,\n name=name,\n inclusive=inclusive,\n **kwargs,\n )\n\n\ndef _time_to_micros(time_obj: dt.time) -> int:\n seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second\n return 1_000_000 * seconds + time_obj.microsecond\n | .venv\Lib\site-packages\pandas\core\indexes\datetimes.py | datetimes.py | Python | 38,330 | 0.95 | 0.102041 | 0.071654 | react-lib | 863 | 2025-02-06T11:19:27.462119 | MIT | false | e1e06e23fe6f80579d36e832095a4a55 |
"""\nShared methods for Index subclasses backed by ExtensionArray.\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n TypeVar,\n)\n\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.generic import ABCDataFrame\n\nfrom pandas.core.indexes.base import Index\n\nif TYPE_CHECKING:\n import numpy as np\n\n from pandas._typing import (\n ArrayLike,\n npt,\n )\n\n from pandas.core.arrays import IntervalArray\n from pandas.core.arrays._mixins import NDArrayBackedExtensionArray\n\n_ExtensionIndexT = TypeVar("_ExtensionIndexT", bound="ExtensionIndex")\n\n\ndef _inherit_from_data(\n name: str, delegate: type, cache: bool = False, wrap: bool = False\n):\n """\n Make an alias for a method of the underlying ExtensionArray.\n\n Parameters\n ----------\n name : str\n Name of an attribute the class should inherit from its EA parent.\n delegate : class\n cache : bool, default False\n Whether to convert wrapped properties into cache_readonly\n wrap : bool, default False\n Whether to wrap the inherited result in an Index.\n\n Returns\n -------\n attribute, method, property, or cache_readonly\n """\n attr = getattr(delegate, name)\n\n if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor":\n # getset_descriptor i.e. property defined in cython class\n if cache:\n\n def cached(self):\n return getattr(self._data, name)\n\n cached.__name__ = name\n cached.__doc__ = attr.__doc__\n method = cache_readonly(cached)\n\n else:\n\n def fget(self):\n result = getattr(self._data, name)\n if wrap:\n if isinstance(result, type(self._data)):\n return type(self)._simple_new(result, name=self.name)\n elif isinstance(result, ABCDataFrame):\n return result.set_index(self)\n return Index(result, name=self.name, dtype=result.dtype)\n return result\n\n def fset(self, value) -> None:\n setattr(self._data, name, value)\n\n fget.__name__ = name\n fget.__doc__ = attr.__doc__\n\n method = property(fget, fset)\n\n elif not callable(attr):\n # just a normal attribute, no wrapping\n method = attr\n\n else:\n # error: Incompatible redefinition (redefinition with type "Callable[[Any,\n # VarArg(Any), KwArg(Any)], Any]", original type "property")\n def method(self, *args, **kwargs): # type: ignore[misc]\n if "inplace" in kwargs:\n raise ValueError(f"cannot use inplace with {type(self).__name__}")\n result = attr(self._data, *args, **kwargs)\n if wrap:\n if isinstance(result, type(self._data)):\n return type(self)._simple_new(result, name=self.name)\n elif isinstance(result, ABCDataFrame):\n return result.set_index(self)\n return Index(result, name=self.name, dtype=result.dtype)\n return result\n\n # error: "property" has no attribute "__name__"\n method.__name__ = name # type: ignore[attr-defined]\n method.__doc__ = attr.__doc__\n return method\n\n\ndef inherit_names(\n names: list[str], delegate: type, cache: bool = False, wrap: bool = False\n) -> Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]:\n """\n Class decorator to pin attributes from an ExtensionArray to a Index subclass.\n\n Parameters\n ----------\n names : List[str]\n delegate : class\n cache : bool, default False\n wrap : bool, default False\n Whether to wrap the inherited result in an Index.\n """\n\n def wrapper(cls: type[_ExtensionIndexT]) -> type[_ExtensionIndexT]:\n for name in names:\n meth = _inherit_from_data(name, delegate, cache=cache, wrap=wrap)\n setattr(cls, name, meth)\n\n return cls\n\n return wrapper\n\n\nclass ExtensionIndex(Index):\n """\n Index subclass for indexes backed by ExtensionArray.\n """\n\n # The base class already passes through to _data:\n # size, __len__, dtype\n\n _data: IntervalArray | NDArrayBackedExtensionArray\n\n # ---------------------------------------------------------------------\n\n def _validate_fill_value(self, value):\n """\n Convert value to be insertable to underlying array.\n """\n return self._data._validate_setitem_value(value)\n\n @cache_readonly\n def _isnan(self) -> npt.NDArray[np.bool_]:\n # error: Incompatible return value type (got "ExtensionArray", expected\n # "ndarray")\n return self._data.isna() # type: ignore[return-value]\n\n\nclass NDArrayBackedExtensionIndex(ExtensionIndex):\n """\n Index subclass for indexes backed by NDArrayBackedExtensionArray.\n """\n\n _data: NDArrayBackedExtensionArray\n\n def _get_engine_target(self) -> np.ndarray:\n return self._data._ndarray\n\n def _from_join_target(self, result: np.ndarray) -> ArrayLike:\n assert result.dtype == self._data._ndarray.dtype\n return self._data._from_backing_data(result)\n | .venv\Lib\site-packages\pandas\core\indexes\extension.py | extension.py | Python | 5,228 | 0.95 | 0.180233 | 0.076336 | python-kit | 810 | 2025-05-09T06:36:56.354041 | BSD-3-Clause | false | c7e38caf6de4ac1a4b321570879ebf95 |
"""\nfrozen (immutable) data structures to support MultiIndexing\n\nThese are used for:\n\n- .names (FrozenList)\n\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n NoReturn,\n)\n\nfrom pandas.core.base import PandasObject\n\nfrom pandas.io.formats.printing import pprint_thing\n\nif TYPE_CHECKING:\n from pandas._typing import Self\n\n\nclass FrozenList(PandasObject, list):\n """\n Container that doesn't allow setting item *but*\n because it's technically hashable, will be used\n for lookups, appropriately, etc.\n """\n\n # Side note: This has to be of type list. Otherwise,\n # it messes up PyTables type checks.\n\n def union(self, other) -> FrozenList:\n """\n Returns a FrozenList with other concatenated to the end of self.\n\n Parameters\n ----------\n other : array-like\n The array-like whose elements we are concatenating.\n\n Returns\n -------\n FrozenList\n The collection difference between self and other.\n """\n if isinstance(other, tuple):\n other = list(other)\n return type(self)(super().__add__(other))\n\n def difference(self, other) -> FrozenList:\n """\n Returns a FrozenList with elements from other removed from self.\n\n Parameters\n ----------\n other : array-like\n The array-like whose elements we are removing self.\n\n Returns\n -------\n FrozenList\n The collection difference between self and other.\n """\n other = set(other)\n temp = [x for x in self if x not in other]\n return type(self)(temp)\n\n # TODO: Consider deprecating these in favor of `union` (xref gh-15506)\n # error: Incompatible types in assignment (expression has type\n # "Callable[[FrozenList, Any], FrozenList]", base class "list" defined the\n # type as overloaded function)\n __add__ = __iadd__ = union # type: ignore[assignment]\n\n def __getitem__(self, n):\n if isinstance(n, slice):\n return type(self)(super().__getitem__(n))\n return super().__getitem__(n)\n\n def __radd__(self, other) -> Self:\n if isinstance(other, tuple):\n other = list(other)\n return type(self)(other + list(self))\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, (tuple, FrozenList)):\n other = list(other)\n return super().__eq__(other)\n\n __req__ = __eq__\n\n def __mul__(self, other) -> Self:\n return type(self)(super().__mul__(other))\n\n __imul__ = __mul__\n\n def __reduce__(self):\n return type(self), (list(self),)\n\n # error: Signature of "__hash__" incompatible with supertype "list"\n def __hash__(self) -> int: # type: ignore[override]\n return hash(tuple(self))\n\n def _disabled(self, *args, **kwargs) -> NoReturn:\n """\n This method will not function because object is immutable.\n """\n raise TypeError(f"'{type(self).__name__}' does not support mutable operations.")\n\n def __str__(self) -> str:\n return pprint_thing(self, quote_strings=True, escape_chars=("\t", "\r", "\n"))\n\n def __repr__(self) -> str:\n return f"{type(self).__name__}({str(self)})"\n\n __setitem__ = __setslice__ = _disabled # type: ignore[assignment]\n __delitem__ = __delslice__ = _disabled\n pop = append = extend = _disabled\n remove = sort = insert = _disabled # type: ignore[assignment]\n | .venv\Lib\site-packages\pandas\core\indexes\frozen.py | frozen.py | Python | 3,482 | 0.95 | 0.2 | 0.076923 | vue-tools | 628 | 2024-07-23T04:07:47.065698 | BSD-3-Clause | false | 2e6daff9a4bf56183ff4d48a17935d2d |
""" define the IntervalIndex """\nfrom __future__ import annotations\n\nfrom operator import (\n le,\n lt,\n)\nimport textwrap\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Literal,\n)\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas._libs.interval import (\n Interval,\n IntervalMixin,\n IntervalTree,\n)\nfrom pandas._libs.tslibs import (\n BaseOffset,\n Period,\n Timedelta,\n Timestamp,\n to_offset,\n)\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import (\n Appender,\n cache_readonly,\n)\nfrom pandas.util._exceptions import rewrite_exception\n\nfrom pandas.core.dtypes.cast import (\n find_common_type,\n infer_dtype_from_scalar,\n maybe_box_datetimelike,\n maybe_downcast_numeric,\n maybe_upcast_numeric_to_64bit,\n)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_list_like,\n is_number,\n is_object_dtype,\n is_scalar,\n is_string_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n DatetimeTZDtype,\n IntervalDtype,\n)\nfrom pandas.core.dtypes.missing import is_valid_na_for_dtype\n\nfrom pandas.core.algorithms import unique\nfrom pandas.core.arrays.datetimelike import validate_periods\nfrom pandas.core.arrays.interval import (\n IntervalArray,\n _interval_shared_docs,\n)\nimport pandas.core.common as com\nfrom pandas.core.indexers import is_valid_positional_slice\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n _index_shared_docs,\n ensure_index,\n maybe_extract_name,\n)\nfrom pandas.core.indexes.datetimes import (\n DatetimeIndex,\n date_range,\n)\nfrom pandas.core.indexes.extension import (\n ExtensionIndex,\n inherit_names,\n)\nfrom pandas.core.indexes.multi import MultiIndex\nfrom pandas.core.indexes.timedeltas import (\n TimedeltaIndex,\n timedelta_range,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n from pandas._typing import (\n Dtype,\n DtypeObj,\n IntervalClosedType,\n Self,\n npt,\n )\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n\n_index_doc_kwargs.update(\n {\n "klass": "IntervalIndex",\n "qualname": "IntervalIndex",\n "target_klass": "IntervalIndex or list of Intervals",\n "name": textwrap.dedent(\n """\\n name : object, optional\n Name to be stored in the index.\n """\n ),\n }\n)\n\n\ndef _get_next_label(label):\n # see test_slice_locs_with_ints_and_floats_succeeds\n dtype = getattr(label, "dtype", type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = "datetime64[ns]"\n dtype = pandas_dtype(dtype)\n\n if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype):\n return label + np.timedelta64(1, "ns")\n elif is_integer_dtype(dtype):\n return label + 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, np.inf)\n else:\n raise TypeError(f"cannot determine next label for type {repr(type(label))}")\n\n\ndef _get_prev_label(label):\n # see test_slice_locs_with_ints_and_floats_succeeds\n dtype = getattr(label, "dtype", type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = "datetime64[ns]"\n dtype = pandas_dtype(dtype)\n\n if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype):\n return label - np.timedelta64(1, "ns")\n elif is_integer_dtype(dtype):\n return label - 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, -np.inf)\n else:\n raise TypeError(f"cannot determine next label for type {repr(type(label))}")\n\n\ndef _new_IntervalIndex(cls, d):\n """\n This is called upon unpickling, rather than the default which doesn't have\n arguments and breaks __new__.\n """\n return cls.from_arrays(**d)\n\n\n@Appender(\n _interval_shared_docs["class"]\n % {\n "klass": "IntervalIndex",\n "summary": "Immutable index of intervals that are closed on the same side.",\n "name": _index_doc_kwargs["name"],\n "extra_attributes": "is_overlapping\nvalues\n",\n "extra_methods": "",\n "examples": textwrap.dedent(\n """\\n Examples\n --------\n A new ``IntervalIndex`` is typically constructed using\n :func:`interval_range`:\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n dtype='interval[int64, right]')\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalIndex.from_arrays`,\n :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.\n\n See further examples in the doc strings of ``interval_range`` and the\n mentioned constructor methods.\n """\n ),\n }\n)\n@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)\n@inherit_names(\n [\n "__array__",\n "overlaps",\n "contains",\n "closed_left",\n "closed_right",\n "open_left",\n "open_right",\n "is_empty",\n ],\n IntervalArray,\n)\n@inherit_names(["is_non_overlapping_monotonic", "closed"], IntervalArray, cache=True)\nclass IntervalIndex(ExtensionIndex):\n _typ = "intervalindex"\n\n # annotate properties pinned via inherit_names\n closed: IntervalClosedType\n is_non_overlapping_monotonic: bool\n closed_left: bool\n closed_right: bool\n open_left: bool\n open_right: bool\n\n _data: IntervalArray\n _values: IntervalArray\n _can_hold_strings = False\n _data_cls = IntervalArray\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data,\n closed: IntervalClosedType | None = None,\n dtype: Dtype | None = None,\n copy: bool = False,\n name: Hashable | None = None,\n verify_integrity: bool = True,\n ) -> Self:\n name = maybe_extract_name(name, data, cls)\n\n with rewrite_exception("IntervalArray", cls.__name__):\n array = IntervalArray(\n data,\n closed=closed,\n copy=copy,\n dtype=dtype,\n verify_integrity=verify_integrity,\n )\n\n return cls._simple_new(array, name)\n\n @classmethod\n @Appender(\n _interval_shared_docs["from_breaks"]\n % {\n "klass": "IntervalIndex",\n "name": textwrap.dedent(\n """\n name : str, optional\n Name of the resulting IntervalIndex."""\n ),\n "examples": textwrap.dedent(\n """\\n Examples\n --------\n >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n dtype='interval[int64, right]')\n """\n ),\n }\n )\n def from_breaks(\n cls,\n breaks,\n closed: IntervalClosedType | None = "right",\n name: Hashable | None = None,\n copy: bool = False,\n dtype: Dtype | None = None,\n ) -> IntervalIndex:\n with rewrite_exception("IntervalArray", cls.__name__):\n array = IntervalArray.from_breaks(\n breaks, closed=closed, copy=copy, dtype=dtype\n )\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(\n _interval_shared_docs["from_arrays"]\n % {\n "klass": "IntervalIndex",\n "name": textwrap.dedent(\n """\n name : str, optional\n Name of the resulting IntervalIndex."""\n ),\n "examples": textwrap.dedent(\n """\\n Examples\n --------\n >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n dtype='interval[int64, right]')\n """\n ),\n }\n )\n def from_arrays(\n cls,\n left,\n right,\n closed: IntervalClosedType = "right",\n name: Hashable | None = None,\n copy: bool = False,\n dtype: Dtype | None = None,\n ) -> IntervalIndex:\n with rewrite_exception("IntervalArray", cls.__name__):\n array = IntervalArray.from_arrays(\n left, right, closed, copy=copy, dtype=dtype\n )\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(\n _interval_shared_docs["from_tuples"]\n % {\n "klass": "IntervalIndex",\n "name": textwrap.dedent(\n """\n name : str, optional\n Name of the resulting IntervalIndex."""\n ),\n "examples": textwrap.dedent(\n """\\n Examples\n --------\n >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])\n IntervalIndex([(0, 1], (1, 2]],\n dtype='interval[int64, right]')\n """\n ),\n }\n )\n def from_tuples(\n cls,\n data,\n closed: IntervalClosedType = "right",\n name: Hashable | None = None,\n copy: bool = False,\n dtype: Dtype | None = None,\n ) -> IntervalIndex:\n with rewrite_exception("IntervalArray", cls.__name__):\n arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)\n return cls._simple_new(arr, name=name)\n\n # --------------------------------------------------------------------\n # error: Return type "IntervalTree" of "_engine" incompatible with return type\n # "Union[IndexEngine, ExtensionEngine]" in supertype "Index"\n @cache_readonly\n def _engine(self) -> IntervalTree: # type: ignore[override]\n # IntervalTree does not supports numpy array unless they are 64 bit\n left = self._maybe_convert_i8(self.left)\n left = maybe_upcast_numeric_to_64bit(left)\n right = self._maybe_convert_i8(self.right)\n right = maybe_upcast_numeric_to_64bit(right)\n return IntervalTree(left, right, closed=self.closed)\n\n def __contains__(self, key: Any) -> bool:\n """\n return a boolean if this key is IN the index\n We *only* accept an Interval\n\n Parameters\n ----------\n key : Interval\n\n Returns\n -------\n bool\n """\n hash(key)\n if not isinstance(key, Interval):\n if is_valid_na_for_dtype(key, self.dtype):\n return self.hasnans\n return False\n\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n def _getitem_slice(self, slobj: slice) -> IntervalIndex:\n """\n Fastpath for __getitem__ when we know we have a slice.\n """\n res = self._data[slobj]\n return type(self)._simple_new(res, name=self._name)\n\n @cache_readonly\n def _multiindex(self) -> MultiIndex:\n return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])\n\n def __reduce__(self):\n d = {\n "left": self.left,\n "right": self.right,\n "closed": self.closed,\n "name": self.name,\n }\n return _new_IntervalIndex, (type(self), d), None\n\n @property\n def inferred_type(self) -> str:\n """Return a string of the type inferred from the values"""\n return "interval"\n\n # Cannot determine type of "memory_usage"\n @Appender(Index.memory_usage.__doc__) # type: ignore[has-type]\n def memory_usage(self, deep: bool = False) -> int:\n # we don't use an explicit engine\n # so return the bytes here\n return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)\n\n # IntervalTree doesn't have a is_monotonic_decreasing, so have to override\n # the Index implementation\n @cache_readonly\n def is_monotonic_decreasing(self) -> bool:\n """\n Return True if the IntervalIndex is monotonic decreasing (only equal or\n decreasing values), else False\n """\n return self[::-1].is_monotonic_increasing\n\n @cache_readonly\n def is_unique(self) -> bool:\n """\n Return True if the IntervalIndex contains unique elements, else False.\n """\n left = self.left\n right = self.right\n\n if self.isna().sum() > 1:\n return False\n\n if left.is_unique or right.is_unique:\n return True\n\n seen_pairs = set()\n check_idx = np.where(left.duplicated(keep=False))[0]\n for idx in check_idx:\n pair = (left[idx], right[idx])\n if pair in seen_pairs:\n return False\n seen_pairs.add(pair)\n\n return True\n\n @property\n def is_overlapping(self) -> bool:\n """\n Return True if the IntervalIndex has overlapping intervals, else False.\n\n Two intervals overlap if they share a common point, including closed\n endpoints. Intervals that only have an open endpoint in common do not\n overlap.\n\n Returns\n -------\n bool\n Boolean indicating if the IntervalIndex has overlapping intervals.\n\n See Also\n --------\n Interval.overlaps : Check whether two Interval objects overlap.\n IntervalIndex.overlaps : Check an IntervalIndex elementwise for\n overlaps.\n\n Examples\n --------\n >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])\n >>> index\n IntervalIndex([(0, 2], (1, 3], (4, 5]],\n dtype='interval[int64, right]')\n >>> index.is_overlapping\n True\n\n Intervals that share closed endpoints overlap:\n\n >>> index = pd.interval_range(0, 3, closed='both')\n >>> index\n IntervalIndex([[0, 1], [1, 2], [2, 3]],\n dtype='interval[int64, both]')\n >>> index.is_overlapping\n True\n\n Intervals that only have an open endpoint in common do not overlap:\n\n >>> index = pd.interval_range(0, 3, closed='left')\n >>> index\n IntervalIndex([[0, 1), [1, 2), [2, 3)],\n dtype='interval[int64, left]')\n >>> index.is_overlapping\n False\n """\n # GH 23309\n return self._engine.is_overlapping\n\n def _needs_i8_conversion(self, key) -> bool:\n """\n Check if a given key needs i8 conversion. Conversion is necessary for\n Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An\n Interval-like requires conversion if its endpoints are one of the\n aforementioned types.\n\n Assumes that any list-like data has already been cast to an Index.\n\n Parameters\n ----------\n key : scalar or Index-like\n The key that should be checked for i8 conversion\n\n Returns\n -------\n bool\n """\n key_dtype = getattr(key, "dtype", None)\n if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):\n return self._needs_i8_conversion(key.left)\n\n i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)\n return isinstance(key, i8_types)\n\n def _maybe_convert_i8(self, key):\n """\n Maybe convert a given key to its equivalent i8 value(s). Used as a\n preprocessing step prior to IntervalTree queries (self._engine), which\n expects numeric data.\n\n Parameters\n ----------\n key : scalar or list-like\n The key that should maybe be converted to i8.\n\n Returns\n -------\n scalar or list-like\n The original key if no conversion occurred, int if converted scalar,\n Index with an int64 dtype if converted list-like.\n """\n if is_list_like(key):\n key = ensure_index(key)\n key = maybe_upcast_numeric_to_64bit(key)\n\n if not self._needs_i8_conversion(key):\n return key\n\n scalar = is_scalar(key)\n key_dtype = getattr(key, "dtype", None)\n if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):\n # convert left/right and reconstruct\n left = self._maybe_convert_i8(key.left)\n right = self._maybe_convert_i8(key.right)\n constructor = Interval if scalar else IntervalIndex.from_arrays\n # error: "object" not callable\n return constructor(\n left, right, closed=self.closed\n ) # type: ignore[operator]\n\n if scalar:\n # Timestamp/Timedelta\n key_dtype, key_i8 = infer_dtype_from_scalar(key)\n if isinstance(key, Period):\n key_i8 = key.ordinal\n elif isinstance(key_i8, Timestamp):\n key_i8 = key_i8._value\n elif isinstance(key_i8, (np.datetime64, np.timedelta64)):\n key_i8 = key_i8.view("i8")\n else:\n # DatetimeIndex/TimedeltaIndex\n key_dtype, key_i8 = key.dtype, Index(key.asi8)\n if key.hasnans:\n # convert NaT from its i8 value to np.nan so it's not viewed\n # as a valid value, maybe causing errors (e.g. is_overlapping)\n key_i8 = key_i8.where(~key._isnan)\n\n # ensure consistency with IntervalIndex subtype\n # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],\n # ExtensionDtype]" has no attribute "subtype"\n subtype = self.dtype.subtype # type: ignore[union-attr]\n\n if subtype != key_dtype:\n raise ValueError(\n f"Cannot index an IntervalIndex of subtype {subtype} with "\n f"values of dtype {key_dtype}"\n )\n\n return key_i8\n\n def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"):\n if not self.is_non_overlapping_monotonic:\n raise KeyError(\n "can only get slices from an IntervalIndex if bounds are "\n "non-overlapping and all monotonic increasing or decreasing"\n )\n\n if isinstance(label, (IntervalMixin, IntervalIndex)):\n raise NotImplementedError("Interval objects are not currently supported")\n\n # GH 20921: "not is_monotonic_increasing" for the second condition\n # instead of "is_monotonic_decreasing" to account for single element\n # indexes being both increasing and decreasing\n if (side == "left" and self.left.is_monotonic_increasing) or (\n side == "right" and not self.left.is_monotonic_increasing\n ):\n sub_idx = self.right\n if self.open_right:\n label = _get_next_label(label)\n else:\n sub_idx = self.left\n if self.open_left:\n label = _get_prev_label(label)\n\n return sub_idx._searchsorted_monotonic(label, side)\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def get_loc(self, key) -> int | slice | np.ndarray:\n """\n Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n\n Returns\n -------\n int if unique index, slice if monotonic index, else mask\n\n Examples\n --------\n >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)\n >>> index = pd.IntervalIndex([i1, i2])\n >>> index.get_loc(1)\n 0\n\n You can also supply a point inside an interval.\n\n >>> index.get_loc(1.5)\n 1\n\n If a label is in several intervals, you get the locations of all the\n relevant intervals.\n\n >>> i3 = pd.Interval(0, 2)\n >>> overlapping_index = pd.IntervalIndex([i1, i2, i3])\n >>> overlapping_index.get_loc(0.5)\n array([ True, False, True])\n\n Only exact matches will be returned if an interval is provided.\n\n >>> index.get_loc(pd.Interval(0, 1))\n 0\n """\n self._check_indexing_error(key)\n\n if isinstance(key, Interval):\n if self.closed != key.closed:\n raise KeyError(key)\n mask = (self.left == key.left) & (self.right == key.right)\n elif is_valid_na_for_dtype(key, self.dtype):\n mask = self.isna()\n else:\n # assume scalar\n op_left = le if self.closed_left else lt\n op_right = le if self.closed_right else lt\n try:\n mask = op_left(self.left, key) & op_right(key, self.right)\n except TypeError as err:\n # scalar is not comparable to II subtype --> invalid label\n raise KeyError(key) from err\n\n matches = mask.sum()\n if matches == 0:\n raise KeyError(key)\n if matches == 1:\n return mask.argmax()\n\n res = lib.maybe_booleans_to_slice(mask.view("u1"))\n if isinstance(res, slice) and res.stop is None:\n # TODO: DO this in maybe_booleans_to_slice?\n res = slice(res.start, len(self), res.step)\n return res\n\n def _get_indexer(\n self,\n target: Index,\n method: str | None = None,\n limit: int | None = None,\n tolerance: Any | None = None,\n ) -> npt.NDArray[np.intp]:\n if isinstance(target, IntervalIndex):\n # We only get here with not self.is_overlapping\n # -> at most one match per interval in target\n # want exact matches -> need both left/right to match, so defer to\n # left/right get_indexer, compare elementwise, equality -> match\n indexer = self._get_indexer_unique_sides(target)\n\n elif not (is_object_dtype(target.dtype) or is_string_dtype(target.dtype)):\n # homogeneous scalar index: use IntervalTree\n # we should always have self._should_partial_index(target) here\n target = self._maybe_convert_i8(target)\n indexer = self._engine.get_indexer(target.values)\n else:\n # heterogeneous scalar index: defer elementwise to get_loc\n # we should always have self._should_partial_index(target) here\n return self._get_indexer_pointwise(target)[0]\n\n return ensure_platform_int(indexer)\n\n @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)\n def get_indexer_non_unique(\n self, target: Index\n ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n target = ensure_index(target)\n\n if not self._should_compare(target) and not self._should_partial_index(target):\n # e.g. IntervalIndex with different closed or incompatible subtype\n # -> no matches\n return self._get_indexer_non_comparable(target, None, unique=False)\n\n elif isinstance(target, IntervalIndex):\n if self.left.is_unique and self.right.is_unique:\n # fastpath available even if we don't have self._index_as_unique\n indexer = self._get_indexer_unique_sides(target)\n missing = (indexer == -1).nonzero()[0]\n else:\n return self._get_indexer_pointwise(target)\n\n elif is_object_dtype(target.dtype) or not self._should_partial_index(target):\n # target might contain intervals: defer elementwise to get_loc\n return self._get_indexer_pointwise(target)\n\n else:\n # Note: this case behaves differently from other Index subclasses\n # because IntervalIndex does partial-int indexing\n target = self._maybe_convert_i8(target)\n indexer, missing = self._engine.get_indexer_non_unique(target.values)\n\n return ensure_platform_int(indexer), ensure_platform_int(missing)\n\n def _get_indexer_unique_sides(self, target: IntervalIndex) -> npt.NDArray[np.intp]:\n """\n _get_indexer specialized to the case where both of our sides are unique.\n """\n # Caller is responsible for checking\n # `self.left.is_unique and self.right.is_unique`\n\n left_indexer = self.left.get_indexer(target.left)\n right_indexer = self.right.get_indexer(target.right)\n indexer = np.where(left_indexer == right_indexer, left_indexer, -1)\n return indexer\n\n def _get_indexer_pointwise(\n self, target: Index\n ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n """\n pointwise implementation for get_indexer and get_indexer_non_unique.\n """\n indexer, missing = [], []\n for i, key in enumerate(target):\n try:\n locs = self.get_loc(key)\n if isinstance(locs, slice):\n # Only needed for get_indexer_non_unique\n locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")\n elif lib.is_integer(locs):\n locs = np.array(locs, ndmin=1)\n else:\n # otherwise we have ndarray[bool]\n locs = np.where(locs)[0]\n except KeyError:\n missing.append(i)\n locs = np.array([-1])\n except InvalidIndexError:\n # i.e. non-scalar key e.g. a tuple.\n # see test_append_different_columns_types_raises\n missing.append(i)\n locs = np.array([-1])\n\n indexer.append(locs)\n\n indexer = np.concatenate(indexer)\n return ensure_platform_int(indexer), ensure_platform_int(missing)\n\n @cache_readonly\n def _index_as_unique(self) -> bool:\n return not self.is_overlapping and self._engine._na_count < 2\n\n _requires_unique_msg = (\n "cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique"\n )\n\n def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]):\n if not (key.step is None or key.step == 1):\n # GH#31658 if label-based, we require step == 1,\n # if positional, we disallow float start/stop\n msg = "label-based slicing with step!=1 is not supported for IntervalIndex"\n if kind == "loc":\n raise ValueError(msg)\n if kind == "getitem":\n if not is_valid_positional_slice(key):\n # i.e. this cannot be interpreted as a positional slice\n raise ValueError(msg)\n\n return super()._convert_slice_indexer(key, kind)\n\n @cache_readonly\n def _should_fallback_to_positional(self) -> bool:\n # integer lookups in Series.__getitem__ are unambiguously\n # positional in this case\n # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],\n # ExtensionDtype]" has no attribute "subtype"\n return self.dtype.subtype.kind in "mM" # type: ignore[union-attr]\n\n def _maybe_cast_slice_bound(self, label, side: str):\n return getattr(self, side)._maybe_cast_slice_bound(label, side)\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n if not isinstance(dtype, IntervalDtype):\n return False\n common_subtype = find_common_type([self.dtype, dtype])\n return not is_object_dtype(common_subtype)\n\n # --------------------------------------------------------------------\n\n @cache_readonly\n def left(self) -> Index:\n return Index(self._data.left, copy=False)\n\n @cache_readonly\n def right(self) -> Index:\n return Index(self._data.right, copy=False)\n\n @cache_readonly\n def mid(self) -> Index:\n return Index(self._data.mid, copy=False)\n\n @property\n def length(self) -> Index:\n return Index(self._data.length, copy=False)\n\n # --------------------------------------------------------------------\n # Set Operations\n\n def _intersection(self, other, sort):\n """\n intersection specialized to the case with matching dtypes.\n """\n # For IntervalIndex we also know other.closed == self.closed\n if self.left.is_unique and self.right.is_unique:\n taken = self._intersection_unique(other)\n elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:\n # Swap other/self if other is unique and self does not have\n # multiple NaNs\n taken = other._intersection_unique(self)\n else:\n # duplicates\n taken = self._intersection_non_unique(other)\n\n if sort is None:\n taken = taken.sort_values()\n\n return taken\n\n def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex:\n """\n Used when the IntervalIndex does not have any common endpoint,\n no matter left or right.\n Return the intersection with another IntervalIndex.\n Parameters\n ----------\n other : IntervalIndex\n Returns\n -------\n IntervalIndex\n """\n # Note: this is much more performant than super()._intersection(other)\n lindexer = self.left.get_indexer(other.left)\n rindexer = self.right.get_indexer(other.right)\n\n match = (lindexer == rindexer) & (lindexer != -1)\n indexer = lindexer.take(match.nonzero()[0])\n indexer = unique(indexer)\n\n return self.take(indexer)\n\n def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex:\n """\n Used when the IntervalIndex does have some common endpoints,\n on either sides.\n Return the intersection with another IntervalIndex.\n\n Parameters\n ----------\n other : IntervalIndex\n\n Returns\n -------\n IntervalIndex\n """\n # Note: this is about 3.25x faster than super()._intersection(other)\n # in IntervalIndexMethod.time_intersection_both_duplicate(1000)\n mask = np.zeros(len(self), dtype=bool)\n\n if self.hasnans and other.hasnans:\n first_nan_loc = np.arange(len(self))[self.isna()][0]\n mask[first_nan_loc] = True\n\n other_tups = set(zip(other.left, other.right))\n for i, tup in enumerate(zip(self.left, self.right)):\n if tup in other_tups:\n mask[i] = True\n\n return self[mask]\n\n # --------------------------------------------------------------------\n\n def _get_engine_target(self) -> np.ndarray:\n # Note: we _could_ use libjoin functions by either casting to object\n # dtype or constructing tuples (faster than constructing Intervals)\n # but the libjoin fastpaths are no longer fast in these cases.\n raise NotImplementedError(\n "IntervalIndex does not use libjoin fastpaths or pass values to "\n "IndexEngine objects"\n )\n\n def _from_join_target(self, result):\n raise NotImplementedError("IntervalIndex does not use libjoin fastpaths")\n\n # TODO: arithmetic operations\n\n\ndef _is_valid_endpoint(endpoint) -> bool:\n """\n Helper for interval_range to check if start/end are valid types.\n """\n return any(\n [\n is_number(endpoint),\n isinstance(endpoint, Timestamp),\n isinstance(endpoint, Timedelta),\n endpoint is None,\n ]\n )\n\n\ndef _is_type_compatible(a, b) -> bool:\n """\n Helper for interval_range to check type compat of start/end/freq.\n """\n is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))\n is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))\n return (\n (is_number(a) and is_number(b))\n or (is_ts_compat(a) and is_ts_compat(b))\n or (is_td_compat(a) and is_td_compat(b))\n or com.any_none(a, b)\n )\n\n\ndef interval_range(\n start=None,\n end=None,\n periods=None,\n freq=None,\n name: Hashable | None = None,\n closed: IntervalClosedType = "right",\n) -> IntervalIndex:\n """\n Return a fixed frequency IntervalIndex.\n\n Parameters\n ----------\n start : numeric or datetime-like, default None\n Left bound for generating intervals.\n end : numeric or datetime-like, default None\n Right bound for generating intervals.\n periods : int, default None\n Number of periods to generate.\n freq : numeric, str, Timedelta, datetime.timedelta, or DateOffset, default None\n The length of each interval. Must be consistent with the type of start\n and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1\n for numeric and 'D' for datetime-like.\n name : str, default None\n Name of the resulting IntervalIndex.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Returns\n -------\n IntervalIndex\n\n See Also\n --------\n IntervalIndex : An Index of intervals that are all closed on the same side.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``IntervalIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end``, inclusively.\n\n To learn more about datetime-like frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Numeric ``start`` and ``end`` is supported.\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n dtype='interval[int64, right]')\n\n Additionally, datetime-like input is also supported.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n ... end=pd.Timestamp('2017-01-04'))\n IntervalIndex([(2017-01-01 00:00:00, 2017-01-02 00:00:00],\n (2017-01-02 00:00:00, 2017-01-03 00:00:00],\n (2017-01-03 00:00:00, 2017-01-04 00:00:00]],\n dtype='interval[datetime64[ns], right]')\n\n The ``freq`` parameter specifies the frequency between the left and right.\n endpoints of the individual intervals within the ``IntervalIndex``. For\n numeric ``start`` and ``end``, the frequency must also be numeric.\n\n >>> pd.interval_range(start=0, periods=4, freq=1.5)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],\n dtype='interval[float64, right]')\n\n Similarly, for datetime-like ``start`` and ``end``, the frequency must be\n convertible to a DateOffset.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n ... periods=3, freq='MS')\n IntervalIndex([(2017-01-01 00:00:00, 2017-02-01 00:00:00],\n (2017-02-01 00:00:00, 2017-03-01 00:00:00],\n (2017-03-01 00:00:00, 2017-04-01 00:00:00]],\n dtype='interval[datetime64[ns], right]')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.interval_range(start=0, end=6, periods=4)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],\n dtype='interval[float64, right]')\n\n The ``closed`` parameter specifies which endpoints of the individual\n intervals within the ``IntervalIndex`` are closed.\n\n >>> pd.interval_range(end=5, periods=4, closed='both')\n IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],\n dtype='interval[int64, both]')\n """\n start = maybe_box_datetimelike(start)\n end = maybe_box_datetimelike(end)\n endpoint = start if start is not None else end\n\n if freq is None and com.any_none(periods, start, end):\n freq = 1 if is_number(endpoint) else "D"\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n "Of the four parameters: start, end, periods, and "\n "freq, exactly three must be specified"\n )\n\n if not _is_valid_endpoint(start):\n raise ValueError(f"start must be numeric or datetime-like, got {start}")\n if not _is_valid_endpoint(end):\n raise ValueError(f"end must be numeric or datetime-like, got {end}")\n\n periods = validate_periods(periods)\n\n if freq is not None and not is_number(freq):\n try:\n freq = to_offset(freq)\n except ValueError as err:\n raise ValueError(\n f"freq must be numeric or convertible to DateOffset, got {freq}"\n ) from err\n\n # verify type compatibility\n if not all(\n [\n _is_type_compatible(start, end),\n _is_type_compatible(start, freq),\n _is_type_compatible(end, freq),\n ]\n ):\n raise TypeError("start, end, freq need to be type compatible")\n\n # +1 to convert interval count to breaks count (n breaks = n-1 intervals)\n if periods is not None:\n periods += 1\n\n breaks: np.ndarray | TimedeltaIndex | DatetimeIndex\n\n if is_number(endpoint):\n if com.all_not_none(start, end, freq):\n # 0.1 ensures we capture end\n breaks = np.arange(start, end + (freq * 0.1), freq)\n else:\n # compute the period/start/end if unspecified (at most one)\n if periods is None:\n periods = int((end - start) // freq) + 1\n elif start is None:\n start = end - (periods - 1) * freq\n elif end is None:\n end = start + (periods - 1) * freq\n\n breaks = np.linspace(start, end, periods)\n if all(is_integer(x) for x in com.not_none(start, end, freq)):\n # np.linspace always produces float output\n\n # error: Argument 1 to "maybe_downcast_numeric" has incompatible type\n # "Union[ndarray[Any, Any], TimedeltaIndex, DatetimeIndex]";\n # expected "ndarray[Any, Any]" [\n breaks = maybe_downcast_numeric(\n breaks, # type: ignore[arg-type]\n np.dtype("int64"),\n )\n else:\n # delegate to the appropriate range function\n if isinstance(endpoint, Timestamp):\n breaks = date_range(start=start, end=end, periods=periods, freq=freq)\n else:\n breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq)\n\n return IntervalIndex.from_breaks(breaks, name=name, closed=closed)\n | .venv\Lib\site-packages\pandas\core\indexes\interval.py | interval.py | Python | 38,246 | 0.95 | 0.135444 | 0.085391 | node-utils | 965 | 2023-12-27T14:31:13.086411 | Apache-2.0 | false | 4afef0a6ab48fb765e60ba681cab0280 |
from __future__ import annotations\n\nfrom datetime import (\n datetime,\n timedelta,\n)\nfrom typing import TYPE_CHECKING\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import index as libindex\nfrom pandas._libs.tslibs import (\n BaseOffset,\n NaT,\n Period,\n Resolution,\n Tick,\n)\nfrom pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR\nfrom pandas.util._decorators import (\n cache_readonly,\n doc,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import is_integer\nfrom pandas.core.dtypes.dtypes import PeriodDtype\nfrom pandas.core.dtypes.generic import ABCSeries\nfrom pandas.core.dtypes.missing import is_valid_na_for_dtype\n\nfrom pandas.core.arrays.period import (\n PeriodArray,\n period_array,\n raise_on_incompatible,\n validate_dtype_freq,\n)\nimport pandas.core.common as com\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import maybe_extract_name\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\nfrom pandas.core.indexes.datetimes import (\n DatetimeIndex,\n Index,\n)\nfrom pandas.core.indexes.extension import inherit_names\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n from pandas._typing import (\n Dtype,\n DtypeObj,\n Self,\n npt,\n )\n\n\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})\n_shared_doc_kwargs = {\n "klass": "PeriodArray",\n}\n\n# --- Period index sketch\n\n\ndef _new_PeriodIndex(cls, **d):\n # GH13277 for unpickling\n values = d.pop("data")\n if values.dtype == "int64":\n freq = d.pop("freq", None)\n dtype = PeriodDtype(freq)\n values = PeriodArray(values, dtype=dtype)\n return cls._simple_new(values, **d)\n else:\n return cls(values, **d)\n\n\n@inherit_names(\n ["strftime", "start_time", "end_time"] + PeriodArray._field_ops,\n PeriodArray,\n wrap=True,\n)\n@inherit_names(["is_leap_year"], PeriodArray)\nclass PeriodIndex(DatetimeIndexOpsMixin):\n """\n Immutable ndarray holding ordinal values indicating regular periods in time.\n\n Index keys are boxed to Period objects which carries the metadata (eg,\n frequency information).\n\n Parameters\n ----------\n data : array-like (1d int np.ndarray or PeriodArray), optional\n Optional period-like data to construct index with.\n copy : bool\n Make a copy of input ndarray.\n freq : str or period object, optional\n One of pandas period strings or corresponding objects.\n year : int, array, or Series, default None\n\n .. deprecated:: 2.2.0\n Use PeriodIndex.from_fields instead.\n month : int, array, or Series, default None\n\n .. deprecated:: 2.2.0\n Use PeriodIndex.from_fields instead.\n quarter : int, array, or Series, default None\n\n .. deprecated:: 2.2.0\n Use PeriodIndex.from_fields instead.\n day : int, array, or Series, default None\n\n .. deprecated:: 2.2.0\n Use PeriodIndex.from_fields instead.\n hour : int, array, or Series, default None\n\n .. deprecated:: 2.2.0\n Use PeriodIndex.from_fields instead.\n minute : int, array, or Series, default None\n\n .. deprecated:: 2.2.0\n Use PeriodIndex.from_fields instead.\n second : int, array, or Series, default None\n\n .. deprecated:: 2.2.0\n Use PeriodIndex.from_fields instead.\n dtype : str or PeriodDtype, default None\n\n Attributes\n ----------\n day\n dayofweek\n day_of_week\n dayofyear\n day_of_year\n days_in_month\n daysinmonth\n end_time\n freq\n freqstr\n hour\n is_leap_year\n minute\n month\n quarter\n qyear\n second\n start_time\n week\n weekday\n weekofyear\n year\n\n Methods\n -------\n asfreq\n strftime\n to_timestamp\n from_fields\n from_ordinals\n\n See Also\n --------\n Index : The base pandas Index type.\n Period : Represents a period of time.\n DatetimeIndex : Index with datetime64 data.\n TimedeltaIndex : Index of timedelta64 data.\n period_range : Create a fixed-frequency PeriodIndex.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex.from_fields(year=[2000, 2002], quarter=[1, 3])\n >>> idx\n PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')\n """\n\n _typ = "periodindex"\n\n _data: PeriodArray\n freq: BaseOffset\n dtype: PeriodDtype\n\n _data_cls = PeriodArray\n _supports_partial_string_indexing = True\n\n @property\n def _engine_type(self) -> type[libindex.PeriodEngine]:\n return libindex.PeriodEngine\n\n @cache_readonly\n def _resolution_obj(self) -> Resolution:\n # for compat with DatetimeIndex\n return self.dtype._resolution_obj\n\n # --------------------------------------------------------------------\n # methods that dispatch to array and wrap result in Index\n # These are defined here instead of via inherit_names for mypy\n\n @doc(\n PeriodArray.asfreq,\n other="pandas.arrays.PeriodArray",\n other_name="PeriodArray",\n **_shared_doc_kwargs,\n )\n def asfreq(self, freq=None, how: str = "E") -> Self:\n arr = self._data.asfreq(freq, how)\n return type(self)._simple_new(arr, name=self.name)\n\n @doc(PeriodArray.to_timestamp)\n def to_timestamp(self, freq=None, how: str = "start") -> DatetimeIndex:\n arr = self._data.to_timestamp(freq, how)\n return DatetimeIndex._simple_new(arr, name=self.name)\n\n @property\n @doc(PeriodArray.hour.fget)\n def hour(self) -> Index:\n return Index(self._data.hour, name=self.name)\n\n @property\n @doc(PeriodArray.minute.fget)\n def minute(self) -> Index:\n return Index(self._data.minute, name=self.name)\n\n @property\n @doc(PeriodArray.second.fget)\n def second(self) -> Index:\n return Index(self._data.second, name=self.name)\n\n # ------------------------------------------------------------------------\n # Index Constructors\n\n def __new__(\n cls,\n data=None,\n ordinal=None,\n freq=None,\n dtype: Dtype | None = None,\n copy: bool = False,\n name: Hashable | None = None,\n **fields,\n ) -> Self:\n valid_field_set = {\n "year",\n "month",\n "day",\n "quarter",\n "hour",\n "minute",\n "second",\n }\n\n refs = None\n if not copy and isinstance(data, (Index, ABCSeries)):\n refs = data._references\n\n if not set(fields).issubset(valid_field_set):\n argument = next(iter(set(fields) - valid_field_set))\n raise TypeError(f"__new__() got an unexpected keyword argument {argument}")\n elif len(fields):\n # GH#55960\n warnings.warn(\n "Constructing PeriodIndex from fields is deprecated. Use "\n "PeriodIndex.from_fields instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if ordinal is not None:\n # GH#55960\n warnings.warn(\n "The 'ordinal' keyword in PeriodIndex is deprecated and will "\n "be removed in a future version. Use PeriodIndex.from_ordinals "\n "instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n name = maybe_extract_name(name, data, cls)\n\n if data is None and ordinal is None:\n # range-based.\n if not fields:\n # test_pickle_compat_construction\n cls._raise_scalar_data_error(None)\n data = cls.from_fields(**fields, freq=freq)._data\n copy = False\n\n elif fields:\n if data is not None:\n raise ValueError("Cannot pass both data and fields")\n raise ValueError("Cannot pass both ordinal and fields")\n\n else:\n freq = validate_dtype_freq(dtype, freq)\n\n # PeriodIndex allow PeriodIndex(period_index, freq=different)\n # Let's not encourage that kind of behavior in PeriodArray.\n\n if freq and isinstance(data, cls) and data.freq != freq:\n # TODO: We can do some of these with no-copy / coercion?\n # e.g. D -> 2D seems to be OK\n data = data.asfreq(freq)\n\n if data is None and ordinal is not None:\n ordinal = np.asarray(ordinal, dtype=np.int64)\n dtype = PeriodDtype(freq)\n data = PeriodArray(ordinal, dtype=dtype)\n elif data is not None and ordinal is not None:\n raise ValueError("Cannot pass both data and ordinal")\n else:\n # don't pass copy here, since we copy later.\n data = period_array(data=data, freq=freq)\n\n if copy:\n data = data.copy()\n\n return cls._simple_new(data, name=name, refs=refs)\n\n @classmethod\n def from_fields(\n cls,\n *,\n year=None,\n quarter=None,\n month=None,\n day=None,\n hour=None,\n minute=None,\n second=None,\n freq=None,\n ) -> Self:\n fields = {\n "year": year,\n "quarter": quarter,\n "month": month,\n "day": day,\n "hour": hour,\n "minute": minute,\n "second": second,\n }\n fields = {key: value for key, value in fields.items() if value is not None}\n arr = PeriodArray._from_fields(fields=fields, freq=freq)\n return cls._simple_new(arr)\n\n @classmethod\n def from_ordinals(cls, ordinals, *, freq, name=None) -> Self:\n ordinals = np.asarray(ordinals, dtype=np.int64)\n dtype = PeriodDtype(freq)\n data = PeriodArray._simple_new(ordinals, dtype=dtype)\n return cls._simple_new(data, name=name)\n\n # ------------------------------------------------------------------------\n # Data\n\n @property\n def values(self) -> npt.NDArray[np.object_]:\n return np.asarray(self, dtype=object)\n\n def _maybe_convert_timedelta(self, other) -> int | npt.NDArray[np.int64]:\n """\n Convert timedelta-like input to an integer multiple of self.freq\n\n Parameters\n ----------\n other : timedelta, np.timedelta64, DateOffset, int, np.ndarray\n\n Returns\n -------\n converted : int, np.ndarray[int64]\n\n Raises\n ------\n IncompatibleFrequency : if the input cannot be written as a multiple\n of self.freq. Note IncompatibleFrequency subclasses ValueError.\n """\n if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):\n if isinstance(self.freq, Tick):\n # _check_timedeltalike_freq_compat will raise if incompatible\n delta = self._data._check_timedeltalike_freq_compat(other)\n return delta\n elif isinstance(other, BaseOffset):\n if other.base == self.freq.base:\n return other.n\n\n raise raise_on_incompatible(self, other)\n elif is_integer(other):\n assert isinstance(other, int)\n return other\n\n # raise when input doesn't have freq\n raise raise_on_incompatible(self, None)\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n """\n Can we compare values of the given dtype to our own?\n """\n return self.dtype == dtype\n\n # ------------------------------------------------------------------------\n # Index Methods\n\n def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> np.ndarray:\n """\n where : array of timestamps\n mask : np.ndarray[bool]\n Array of booleans where data is not NA.\n """\n if isinstance(where, DatetimeIndex):\n where = PeriodIndex(where._values, freq=self.freq)\n elif not isinstance(where, PeriodIndex):\n raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")\n\n return super().asof_locs(where, mask)\n\n @property\n def is_full(self) -> bool:\n """\n Returns True if this PeriodIndex is range-like in that all Periods\n between start and end are present, in order.\n """\n if len(self) == 0:\n return True\n if not self.is_monotonic_increasing:\n raise ValueError("Index is not monotonic")\n values = self.asi8\n return bool(((values[1:] - values[:-1]) < 2).all())\n\n @property\n def inferred_type(self) -> str:\n # b/c data is represented as ints make sure we can't have ambiguous\n # indexing\n return "period"\n\n # ------------------------------------------------------------------------\n # Indexing Methods\n\n def _convert_tolerance(self, tolerance, target):\n # Returned tolerance must be in dtype/units so that\n # `|self._get_engine_target() - target._engine_target()| <= tolerance`\n # is meaningful. Since PeriodIndex returns int64 for engine_target,\n # we may need to convert timedelta64 tolerance to int64.\n tolerance = super()._convert_tolerance(tolerance, target)\n\n if self.dtype == target.dtype:\n # convert tolerance to i8\n tolerance = self._maybe_convert_timedelta(tolerance)\n\n return tolerance\n\n def get_loc(self, key):\n """\n Get integer location for requested label.\n\n Parameters\n ----------\n key : Period, NaT, str, or datetime\n String or datetime key must be parsable as Period.\n\n Returns\n -------\n loc : int or ndarray[int64]\n\n Raises\n ------\n KeyError\n Key is not present in the index.\n TypeError\n If key is listlike or otherwise not hashable.\n """\n orig_key = key\n\n self._check_indexing_error(key)\n\n if is_valid_na_for_dtype(key, self.dtype):\n key = NaT\n\n elif isinstance(key, str):\n try:\n parsed, reso = self._parse_with_reso(key)\n except ValueError as err:\n # A string with invalid format\n raise KeyError(f"Cannot interpret '{key}' as period") from err\n\n if self._can_partial_date_slice(reso):\n try:\n return self._partial_date_slice(reso, parsed)\n except KeyError as err:\n raise KeyError(key) from err\n\n if reso == self._resolution_obj:\n # the reso < self._resolution_obj case goes\n # through _get_string_slice\n key = self._cast_partial_indexing_scalar(parsed)\n else:\n raise KeyError(key)\n\n elif isinstance(key, Period):\n self._disallow_mismatched_indexing(key)\n\n elif isinstance(key, datetime):\n key = self._cast_partial_indexing_scalar(key)\n\n else:\n # in particular integer, which Period constructor would cast to string\n raise KeyError(key)\n\n try:\n return Index.get_loc(self, key)\n except KeyError as err:\n raise KeyError(orig_key) from err\n\n def _disallow_mismatched_indexing(self, key: Period) -> None:\n if key._dtype != self.dtype:\n raise KeyError(key)\n\n def _cast_partial_indexing_scalar(self, label: datetime) -> Period:\n try:\n period = Period(label, freq=self.freq)\n except ValueError as err:\n # we cannot construct the Period\n raise KeyError(label) from err\n return period\n\n @doc(DatetimeIndexOpsMixin._maybe_cast_slice_bound)\n def _maybe_cast_slice_bound(self, label, side: str):\n if isinstance(label, datetime):\n label = self._cast_partial_indexing_scalar(label)\n\n return super()._maybe_cast_slice_bound(label, side)\n\n def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):\n freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev)\n iv = Period(parsed, freq=freq)\n return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))\n\n @doc(DatetimeIndexOpsMixin.shift)\n def shift(self, periods: int = 1, freq=None) -> Self:\n if freq is not None:\n raise TypeError(\n f"`freq` argument is not supported for {type(self).__name__}.shift"\n )\n return self + periods\n\n\ndef period_range(\n start=None,\n end=None,\n periods: int | None = None,\n freq=None,\n name: Hashable | None = None,\n) -> PeriodIndex:\n """\n Return a fixed frequency PeriodIndex.\n\n The day (calendar) is the default frequency.\n\n Parameters\n ----------\n start : str, datetime, date, pandas.Timestamp, or period-like, default None\n Left bound for generating periods.\n end : str, datetime, date, pandas.Timestamp, or period-like, default None\n Right bound for generating periods.\n periods : int, default None\n Number of periods to generate.\n freq : str or DateOffset, optional\n Frequency alias. By default the freq is taken from `start` or `end`\n if those are Period objects. Otherwise, the default is ``"D"`` for\n daily frequency.\n name : str, default None\n Name of the resulting PeriodIndex.\n\n Returns\n -------\n PeriodIndex\n\n Notes\n -----\n Of the three parameters: ``start``, ``end``, and ``periods``, exactly two\n must be specified.\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')\n PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',\n '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',\n '2018-01'],\n dtype='period[M]')\n\n If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor\n endpoints for a ``PeriodIndex`` with frequency matching that of the\n ``period_range`` constructor.\n\n >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),\n ... end=pd.Period('2017Q2', freq='Q'), freq='M')\n PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],\n dtype='period[M]')\n """\n if com.count_not_none(start, end, periods) != 2:\n raise ValueError(\n "Of the three parameters: start, end, and periods, "\n "exactly two must be specified"\n )\n if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):\n freq = "D"\n\n data, freq = PeriodArray._generate_range(start, end, periods, freq)\n dtype = PeriodDtype(freq)\n data = PeriodArray(data, dtype=dtype)\n return PeriodIndex(data, name=name)\n | .venv\Lib\site-packages\pandas\core\indexes\period.py | period.py | Python | 18,978 | 0.95 | 0.117264 | 0.078125 | python-kit | 736 | 2025-05-09T17:28:02.157037 | BSD-3-Clause | false | 33519d14b63bd7c85f0e5d844933e403 |
from __future__ import annotations\n\nfrom collections.abc import (\n Hashable,\n Iterator,\n)\nfrom datetime import timedelta\nimport operator\nfrom sys import getsizeof\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Literal,\n cast,\n overload,\n)\n\nimport numpy as np\n\nfrom pandas._libs import (\n index as libindex,\n lib,\n)\nfrom pandas._libs.algos import unique_deltas\nfrom pandas._libs.lib import no_default\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import (\n cache_readonly,\n deprecate_nonkeyword_arguments,\n doc,\n)\n\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n ensure_python_int,\n is_float,\n is_integer,\n is_scalar,\n is_signed_integer_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCTimedeltaIndex\n\nfrom pandas.core import ops\nimport pandas.core.common as com\nfrom pandas.core.construction import extract_array\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n maybe_extract_name,\n)\nfrom pandas.core.ops.common import unpack_zerodim_and_defer\n\nif TYPE_CHECKING:\n from pandas._typing import (\n Axis,\n Dtype,\n NaPosition,\n Self,\n npt,\n )\n_empty_range = range(0)\n_dtype_int64 = np.dtype(np.int64)\n\n\nclass RangeIndex(Index):\n """\n Immutable Index implementing a monotonic integer range.\n\n RangeIndex is a memory-saving special case of an Index limited to representing\n monotonic ranges with a 64-bit dtype. Using RangeIndex may in some instances\n improve computing speed.\n\n This is the default index type used\n by DataFrame and Series when no explicit index is provided by the user.\n\n Parameters\n ----------\n start : int (default: 0), range, or other RangeIndex instance\n If int and "stop" is not given, interpreted as "stop" instead.\n stop : int (default: 0)\n step : int (default: 1)\n dtype : np.int64\n Unused, accepted for homogeneity with other index types.\n copy : bool, default False\n Unused, accepted for homogeneity with other index types.\n name : object, optional\n Name to be stored in the index.\n\n Attributes\n ----------\n start\n stop\n step\n\n Methods\n -------\n from_range\n\n See Also\n --------\n Index : The base pandas Index type.\n\n Examples\n --------\n >>> list(pd.RangeIndex(5))\n [0, 1, 2, 3, 4]\n\n >>> list(pd.RangeIndex(-2, 4))\n [-2, -1, 0, 1, 2, 3]\n\n >>> list(pd.RangeIndex(0, 10, 2))\n [0, 2, 4, 6, 8]\n\n >>> list(pd.RangeIndex(2, -10, -3))\n [2, -1, -4, -7]\n\n >>> list(pd.RangeIndex(0))\n []\n\n >>> list(pd.RangeIndex(1, 0))\n []\n """\n\n _typ = "rangeindex"\n _dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")\n _range: range\n _values: np.ndarray\n\n @property\n def _engine_type(self) -> type[libindex.Int64Engine]:\n return libindex.Int64Engine\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n start=None,\n stop=None,\n step=None,\n dtype: Dtype | None = None,\n copy: bool = False,\n name: Hashable | None = None,\n ) -> Self:\n cls._validate_dtype(dtype)\n name = maybe_extract_name(name, start, cls)\n\n # RangeIndex\n if isinstance(start, cls):\n return start.copy(name=name)\n elif isinstance(start, range):\n return cls._simple_new(start, name=name)\n\n # validate the arguments\n if com.all_none(start, stop, step):\n raise TypeError("RangeIndex(...) must be called with integers")\n\n start = ensure_python_int(start) if start is not None else 0\n\n if stop is None:\n start, stop = 0, start\n else:\n stop = ensure_python_int(stop)\n\n step = ensure_python_int(step) if step is not None else 1\n if step == 0:\n raise ValueError("Step must not be zero")\n\n rng = range(start, stop, step)\n return cls._simple_new(rng, name=name)\n\n @classmethod\n def from_range(cls, data: range, name=None, dtype: Dtype | None = None) -> Self:\n """\n Create :class:`pandas.RangeIndex` from a ``range`` object.\n\n Returns\n -------\n RangeIndex\n\n Examples\n --------\n >>> pd.RangeIndex.from_range(range(5))\n RangeIndex(start=0, stop=5, step=1)\n\n >>> pd.RangeIndex.from_range(range(2, -10, -3))\n RangeIndex(start=2, stop=-10, step=-3)\n """\n if not isinstance(data, range):\n raise TypeError(\n f"{cls.__name__}(...) must be called with object coercible to a "\n f"range, {repr(data)} was passed"\n )\n cls._validate_dtype(dtype)\n return cls._simple_new(data, name=name)\n\n # error: Argument 1 of "_simple_new" is incompatible with supertype "Index";\n # supertype defines the argument type as\n # "Union[ExtensionArray, ndarray[Any, Any]]" [override]\n @classmethod\n def _simple_new( # type: ignore[override]\n cls, values: range, name: Hashable | None = None\n ) -> Self:\n result = object.__new__(cls)\n\n assert isinstance(values, range)\n\n result._range = values\n result._name = name\n result._cache = {}\n result._reset_identity()\n result._references = None\n return result\n\n @classmethod\n def _validate_dtype(cls, dtype: Dtype | None) -> None:\n if dtype is None:\n return\n\n validation_func, expected = cls._dtype_validation_metadata\n if not validation_func(dtype):\n raise ValueError(\n f"Incorrect `dtype` passed: expected {expected}, received {dtype}"\n )\n\n # --------------------------------------------------------------------\n\n # error: Return type "Type[Index]" of "_constructor" incompatible with return\n # type "Type[RangeIndex]" in supertype "Index"\n @cache_readonly\n def _constructor(self) -> type[Index]: # type: ignore[override]\n """return the class to use for construction"""\n return Index\n\n # error: Signature of "_data" incompatible with supertype "Index"\n @cache_readonly\n def _data(self) -> np.ndarray: # type: ignore[override]\n """\n An int array that for performance reasons is created only when needed.\n\n The constructed array is saved in ``_cache``.\n """\n return np.arange(self.start, self.stop, self.step, dtype=np.int64)\n\n def _get_data_as_items(self) -> list[tuple[str, int]]:\n """return a list of tuples of start, stop, step"""\n rng = self._range\n return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]\n\n def __reduce__(self):\n d = {"name": self._name}\n d.update(dict(self._get_data_as_items()))\n return ibase._new_Index, (type(self), d), None\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n def _format_attrs(self):\n """\n Return a list of tuples of the (attr, formatted_value)\n """\n attrs = cast("list[tuple[str, str | int]]", self._get_data_as_items())\n if self._name is not None:\n attrs.append(("name", ibase.default_pprint(self._name)))\n return attrs\n\n def _format_with_header(self, *, header: list[str], na_rep: str) -> list[str]:\n # Equivalent to Index implementation, but faster\n if not len(self._range):\n return header\n first_val_str = str(self._range[0])\n last_val_str = str(self._range[-1])\n max_length = max(len(first_val_str), len(last_val_str))\n\n return header + [f"{x:<{max_length}}" for x in self._range]\n\n # --------------------------------------------------------------------\n\n @property\n def start(self) -> int:\n """\n The value of the `start` parameter (``0`` if this was not supplied).\n\n Examples\n --------\n >>> idx = pd.RangeIndex(5)\n >>> idx.start\n 0\n\n >>> idx = pd.RangeIndex(2, -10, -3)\n >>> idx.start\n 2\n """\n # GH 25710\n return self._range.start\n\n @property\n def stop(self) -> int:\n """\n The value of the `stop` parameter.\n\n Examples\n --------\n >>> idx = pd.RangeIndex(5)\n >>> idx.stop\n 5\n\n >>> idx = pd.RangeIndex(2, -10, -3)\n >>> idx.stop\n -10\n """\n return self._range.stop\n\n @property\n def step(self) -> int:\n """\n The value of the `step` parameter (``1`` if this was not supplied).\n\n Examples\n --------\n >>> idx = pd.RangeIndex(5)\n >>> idx.step\n 1\n\n >>> idx = pd.RangeIndex(2, -10, -3)\n >>> idx.step\n -3\n\n Even if :class:`pandas.RangeIndex` is empty, ``step`` is still ``1`` if\n not supplied.\n\n >>> idx = pd.RangeIndex(1, 0)\n >>> idx.step\n 1\n """\n # GH 25710\n return self._range.step\n\n @cache_readonly\n def nbytes(self) -> int:\n """\n Return the number of bytes in the underlying data.\n """\n rng = self._range\n return getsizeof(rng) + sum(\n getsizeof(getattr(rng, attr_name))\n for attr_name in ["start", "stop", "step"]\n )\n\n def memory_usage(self, deep: bool = False) -> int:\n """\n Memory usage of my values\n\n Parameters\n ----------\n deep : bool\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption\n\n Returns\n -------\n bytes used\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False\n\n See Also\n --------\n numpy.ndarray.nbytes\n """\n return self.nbytes\n\n @property\n def dtype(self) -> np.dtype:\n return _dtype_int64\n\n @property\n def is_unique(self) -> bool:\n """return if the index has unique values"""\n return True\n\n @cache_readonly\n def is_monotonic_increasing(self) -> bool:\n return self._range.step > 0 or len(self) <= 1\n\n @cache_readonly\n def is_monotonic_decreasing(self) -> bool:\n return self._range.step < 0 or len(self) <= 1\n\n def __contains__(self, key: Any) -> bool:\n hash(key)\n try:\n key = ensure_python_int(key)\n except TypeError:\n return False\n return key in self._range\n\n @property\n def inferred_type(self) -> str:\n return "integer"\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n @doc(Index.get_loc)\n def get_loc(self, key) -> int:\n if is_integer(key) or (is_float(key) and key.is_integer()):\n new_key = int(key)\n try:\n return self._range.index(new_key)\n except ValueError as err:\n raise KeyError(key) from err\n if isinstance(key, Hashable):\n raise KeyError(key)\n self._check_indexing_error(key)\n raise KeyError(key)\n\n def _get_indexer(\n self,\n target: Index,\n method: str | None = None,\n limit: int | None = None,\n tolerance=None,\n ) -> npt.NDArray[np.intp]:\n if com.any_not_none(method, tolerance, limit):\n return super()._get_indexer(\n target, method=method, tolerance=tolerance, limit=limit\n )\n\n if self.step > 0:\n start, stop, step = self.start, self.stop, self.step\n else:\n # GH 28678: work on reversed range for simplicity\n reverse = self._range[::-1]\n start, stop, step = reverse.start, reverse.stop, reverse.step\n\n target_array = np.asarray(target)\n locs = target_array - start\n valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)\n locs[~valid] = -1\n locs[valid] = locs[valid] / step\n\n if step != self.step:\n # We reversed this range: transform to original locs\n locs[valid] = len(self) - 1 - locs[valid]\n return ensure_platform_int(locs)\n\n @cache_readonly\n def _should_fallback_to_positional(self) -> bool:\n """\n Should an integer key be treated as positional?\n """\n return False\n\n # --------------------------------------------------------------------\n\n def tolist(self) -> list[int]:\n return list(self._range)\n\n @doc(Index.__iter__)\n def __iter__(self) -> Iterator[int]:\n yield from self._range\n\n @doc(Index._shallow_copy)\n def _shallow_copy(self, values, name: Hashable = no_default):\n name = self._name if name is no_default else name\n\n if values.dtype.kind == "f":\n return Index(values, name=name, dtype=np.float64)\n # GH 46675 & 43885: If values is equally spaced, return a\n # more memory-compact RangeIndex instead of Index with 64-bit dtype\n unique_diffs = unique_deltas(values)\n if len(unique_diffs) == 1 and unique_diffs[0] != 0:\n diff = unique_diffs[0]\n new_range = range(values[0], values[-1] + diff, diff)\n return type(self)._simple_new(new_range, name=name)\n else:\n return self._constructor._simple_new(values, name=name)\n\n def _view(self) -> Self:\n result = type(self)._simple_new(self._range, name=self._name)\n result._cache = self._cache\n return result\n\n @doc(Index.copy)\n def copy(self, name: Hashable | None = None, deep: bool = False) -> Self:\n name = self._validate_names(name=name, deep=deep)[0]\n new_index = self._rename(name=name)\n return new_index\n\n def _minmax(self, meth: str):\n no_steps = len(self) - 1\n if no_steps == -1:\n return np.nan\n elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):\n return self.start\n\n return self.start + self.step * no_steps\n\n def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:\n """The minimum value of the RangeIndex"""\n nv.validate_minmax_axis(axis)\n nv.validate_min(args, kwargs)\n return self._minmax("min")\n\n def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:\n """The maximum value of the RangeIndex"""\n nv.validate_minmax_axis(axis)\n nv.validate_max(args, kwargs)\n return self._minmax("max")\n\n def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:\n """\n Returns the indices that would sort the index and its\n underlying data.\n\n Returns\n -------\n np.ndarray[np.intp]\n\n See Also\n --------\n numpy.ndarray.argsort\n """\n ascending = kwargs.pop("ascending", True) # EA compat\n kwargs.pop("kind", None) # e.g. "mergesort" is irrelevant\n nv.validate_argsort(args, kwargs)\n\n if self._range.step > 0:\n result = np.arange(len(self), dtype=np.intp)\n else:\n result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)\n\n if not ascending:\n result = result[::-1]\n return result\n\n def factorize(\n self,\n sort: bool = False,\n use_na_sentinel: bool = True,\n ) -> tuple[npt.NDArray[np.intp], RangeIndex]:\n codes = np.arange(len(self), dtype=np.intp)\n uniques = self\n if sort and self.step < 0:\n codes = codes[::-1]\n uniques = uniques[::-1]\n return codes, uniques\n\n def equals(self, other: object) -> bool:\n """\n Determines if two Index objects contain the same elements.\n """\n if isinstance(other, RangeIndex):\n return self._range == other._range\n return super().equals(other)\n\n # error: Signature of "sort_values" incompatible with supertype "Index"\n @overload # type: ignore[override]\n def sort_values(\n self,\n *,\n return_indexer: Literal[False] = ...,\n ascending: bool = ...,\n na_position: NaPosition = ...,\n key: Callable | None = ...,\n ) -> Self:\n ...\n\n @overload\n def sort_values(\n self,\n *,\n return_indexer: Literal[True],\n ascending: bool = ...,\n na_position: NaPosition = ...,\n key: Callable | None = ...,\n ) -> tuple[Self, np.ndarray | RangeIndex]:\n ...\n\n @overload\n def sort_values(\n self,\n *,\n return_indexer: bool = ...,\n ascending: bool = ...,\n na_position: NaPosition = ...,\n key: Callable | None = ...,\n ) -> Self | tuple[Self, np.ndarray | RangeIndex]:\n ...\n\n @deprecate_nonkeyword_arguments(\n version="3.0", allowed_args=["self"], name="sort_values"\n )\n def sort_values(\n self,\n return_indexer: bool = False,\n ascending: bool = True,\n na_position: NaPosition = "last",\n key: Callable | None = None,\n ) -> Self | tuple[Self, np.ndarray | RangeIndex]:\n if key is not None:\n return super().sort_values(\n return_indexer=return_indexer,\n ascending=ascending,\n na_position=na_position,\n key=key,\n )\n else:\n sorted_index = self\n inverse_indexer = False\n if ascending:\n if self.step < 0:\n sorted_index = self[::-1]\n inverse_indexer = True\n else:\n if self.step > 0:\n sorted_index = self[::-1]\n inverse_indexer = True\n\n if return_indexer:\n if inverse_indexer:\n rng = range(len(self) - 1, -1, -1)\n else:\n rng = range(len(self))\n return sorted_index, RangeIndex(rng)\n else:\n return sorted_index\n\n # --------------------------------------------------------------------\n # Set Operations\n\n def _intersection(self, other: Index, sort: bool = False):\n # caller is responsible for checking self and other are both non-empty\n\n if not isinstance(other, RangeIndex):\n return super()._intersection(other, sort=sort)\n\n first = self._range[::-1] if self.step < 0 else self._range\n second = other._range[::-1] if other.step < 0 else other._range\n\n # check whether intervals intersect\n # deals with in- and decreasing ranges\n int_low = max(first.start, second.start)\n int_high = min(first.stop, second.stop)\n if int_high <= int_low:\n return self._simple_new(_empty_range)\n\n # Method hint: linear Diophantine equation\n # solve intersection problem\n # performance hint: for identical step sizes, could use\n # cheaper alternative\n gcd, s, _ = self._extended_gcd(first.step, second.step)\n\n # check whether element sets intersect\n if (first.start - second.start) % gcd:\n return self._simple_new(_empty_range)\n\n # calculate parameters for the RangeIndex describing the\n # intersection disregarding the lower bounds\n tmp_start = first.start + (second.start - first.start) * first.step // gcd * s\n new_step = first.step * second.step // gcd\n new_range = range(tmp_start, int_high, new_step)\n new_index = self._simple_new(new_range)\n\n # adjust index to limiting interval\n new_start = new_index._min_fitting_element(int_low)\n new_range = range(new_start, new_index.stop, new_index.step)\n new_index = self._simple_new(new_range)\n\n if (self.step < 0 and other.step < 0) is not (new_index.step < 0):\n new_index = new_index[::-1]\n\n if sort is None:\n new_index = new_index.sort_values()\n\n return new_index\n\n def _min_fitting_element(self, lower_limit: int) -> int:\n """Returns the smallest element greater than or equal to the limit"""\n no_steps = -(-(lower_limit - self.start) // abs(self.step))\n return self.start + abs(self.step) * no_steps\n\n def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:\n """\n Extended Euclidean algorithms to solve Bezout's identity:\n a*x + b*y = gcd(x, y)\n Finds one particular solution for x, y: s, t\n Returns: gcd, s, t\n """\n s, old_s = 0, 1\n t, old_t = 1, 0\n r, old_r = b, a\n while r:\n quotient = old_r // r\n old_r, r = r, old_r - quotient * r\n old_s, s = s, old_s - quotient * s\n old_t, t = t, old_t - quotient * t\n return old_r, old_s, old_t\n\n def _range_in_self(self, other: range) -> bool:\n """Check if other range is contained in self"""\n # https://stackoverflow.com/a/32481015\n if not other:\n return True\n if not self._range:\n return False\n if len(other) > 1 and other.step % self._range.step:\n return False\n return other.start in self._range and other[-1] in self._range\n\n def _union(self, other: Index, sort: bool | None):\n """\n Form the union of two Index objects and sorts if possible\n\n Parameters\n ----------\n other : Index or array-like\n\n sort : bool or None, default None\n Whether to sort (monotonically increasing) the resulting index.\n ``sort=None|True`` returns a ``RangeIndex`` if possible or a sorted\n ``Index`` with a int64 dtype if not.\n ``sort=False`` can return a ``RangeIndex`` if self is monotonically\n increasing and other is fully contained in self. Otherwise, returns\n an unsorted ``Index`` with an int64 dtype.\n\n Returns\n -------\n union : Index\n """\n if isinstance(other, RangeIndex):\n if sort in (None, True) or (\n sort is False and self.step > 0 and self._range_in_self(other._range)\n ):\n # GH 47557: Can still return a RangeIndex\n # if other range in self and sort=False\n start_s, step_s = self.start, self.step\n end_s = self.start + self.step * (len(self) - 1)\n start_o, step_o = other.start, other.step\n end_o = other.start + other.step * (len(other) - 1)\n if self.step < 0:\n start_s, step_s, end_s = end_s, -step_s, start_s\n if other.step < 0:\n start_o, step_o, end_o = end_o, -step_o, start_o\n if len(self) == 1 and len(other) == 1:\n step_s = step_o = abs(self.start - other.start)\n elif len(self) == 1:\n step_s = step_o\n elif len(other) == 1:\n step_o = step_s\n start_r = min(start_s, start_o)\n end_r = max(end_s, end_o)\n if step_o == step_s:\n if (\n (start_s - start_o) % step_s == 0\n and (start_s - end_o) <= step_s\n and (start_o - end_s) <= step_s\n ):\n return type(self)(start_r, end_r + step_s, step_s)\n if (\n (step_s % 2 == 0)\n and (abs(start_s - start_o) == step_s / 2)\n and (abs(end_s - end_o) == step_s / 2)\n ):\n # e.g. range(0, 10, 2) and range(1, 11, 2)\n # but not range(0, 20, 4) and range(1, 21, 4) GH#44019\n return type(self)(start_r, end_r + step_s / 2, step_s / 2)\n\n elif step_o % step_s == 0:\n if (\n (start_o - start_s) % step_s == 0\n and (start_o + step_s >= start_s)\n and (end_o - step_s <= end_s)\n ):\n return type(self)(start_r, end_r + step_s, step_s)\n elif step_s % step_o == 0:\n if (\n (start_s - start_o) % step_o == 0\n and (start_s + step_o >= start_o)\n and (end_s - step_o <= end_o)\n ):\n return type(self)(start_r, end_r + step_o, step_o)\n\n return super()._union(other, sort=sort)\n\n def _difference(self, other, sort=None):\n # optimized set operation if we have another RangeIndex\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name = self._convert_can_do_setop(other)\n\n if not isinstance(other, RangeIndex):\n return super()._difference(other, sort=sort)\n\n if sort is not False and self.step < 0:\n return self[::-1]._difference(other)\n\n res_name = ops.get_op_result_name(self, other)\n\n first = self._range[::-1] if self.step < 0 else self._range\n overlap = self.intersection(other)\n if overlap.step < 0:\n overlap = overlap[::-1]\n\n if len(overlap) == 0:\n return self.rename(name=res_name)\n if len(overlap) == len(self):\n return self[:0].rename(res_name)\n\n # overlap.step will always be a multiple of self.step (see _intersection)\n\n if len(overlap) == 1:\n if overlap[0] == self[0]:\n return self[1:]\n\n elif overlap[0] == self[-1]:\n return self[:-1]\n\n elif len(self) == 3 and overlap[0] == self[1]:\n return self[::2]\n\n else:\n return super()._difference(other, sort=sort)\n\n elif len(overlap) == 2 and overlap[0] == first[0] and overlap[-1] == first[-1]:\n # e.g. range(-8, 20, 7) and range(13, -9, -3)\n return self[1:-1]\n\n if overlap.step == first.step:\n if overlap[0] == first.start:\n # The difference is everything after the intersection\n new_rng = range(overlap[-1] + first.step, first.stop, first.step)\n elif overlap[-1] == first[-1]:\n # The difference is everything before the intersection\n new_rng = range(first.start, overlap[0], first.step)\n elif overlap._range == first[1:-1]:\n # e.g. range(4) and range(1, 3)\n step = len(first) - 1\n new_rng = first[::step]\n else:\n # The difference is not range-like\n # e.g. range(1, 10, 1) and range(3, 7, 1)\n return super()._difference(other, sort=sort)\n\n else:\n # We must have len(self) > 1, bc we ruled out above\n # len(overlap) == 0 and len(overlap) == len(self)\n assert len(self) > 1\n\n if overlap.step == first.step * 2:\n if overlap[0] == first[0] and overlap[-1] in (first[-1], first[-2]):\n # e.g. range(1, 10, 1) and range(1, 10, 2)\n new_rng = first[1::2]\n\n elif overlap[0] == first[1] and overlap[-1] in (first[-1], first[-2]):\n # e.g. range(1, 10, 1) and range(2, 10, 2)\n new_rng = first[::2]\n\n else:\n # We can get here with e.g. range(20) and range(0, 10, 2)\n return super()._difference(other, sort=sort)\n\n else:\n # e.g. range(10) and range(0, 10, 3)\n return super()._difference(other, sort=sort)\n\n new_index = type(self)._simple_new(new_rng, name=res_name)\n if first is not self._range:\n new_index = new_index[::-1]\n\n return new_index\n\n def symmetric_difference(\n self, other, result_name: Hashable | None = None, sort=None\n ):\n if not isinstance(other, RangeIndex) or sort is not None:\n return super().symmetric_difference(other, result_name, sort)\n\n left = self.difference(other)\n right = other.difference(self)\n result = left.union(right)\n\n if result_name is not None:\n result = result.rename(result_name)\n return result\n\n # --------------------------------------------------------------------\n\n # error: Return type "Index" of "delete" incompatible with return type\n # "RangeIndex" in supertype "Index"\n def delete(self, loc) -> Index: # type: ignore[override]\n # In some cases we can retain RangeIndex, see also\n # DatetimeTimedeltaMixin._get_delete_Freq\n if is_integer(loc):\n if loc in (0, -len(self)):\n return self[1:]\n if loc in (-1, len(self) - 1):\n return self[:-1]\n if len(self) == 3 and loc in (1, -2):\n return self[::2]\n\n elif lib.is_list_like(loc):\n slc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self))\n\n if isinstance(slc, slice):\n # defer to RangeIndex._difference, which is optimized to return\n # a RangeIndex whenever possible\n other = self[slc]\n return self.difference(other, sort=False)\n\n return super().delete(loc)\n\n def insert(self, loc: int, item) -> Index:\n if len(self) and (is_integer(item) or is_float(item)):\n # We can retain RangeIndex is inserting at the beginning or end,\n # or right in the middle.\n rng = self._range\n if loc == 0 and item == self[0] - self.step:\n new_rng = range(rng.start - rng.step, rng.stop, rng.step)\n return type(self)._simple_new(new_rng, name=self._name)\n\n elif loc == len(self) and item == self[-1] + self.step:\n new_rng = range(rng.start, rng.stop + rng.step, rng.step)\n return type(self)._simple_new(new_rng, name=self._name)\n\n elif len(self) == 2 and item == self[0] + self.step / 2:\n # e.g. inserting 1 into [0, 2]\n step = int(self.step / 2)\n new_rng = range(self.start, self.stop, step)\n return type(self)._simple_new(new_rng, name=self._name)\n\n return super().insert(loc, item)\n\n def _concat(self, indexes: list[Index], name: Hashable) -> Index:\n """\n Overriding parent method for the case of all RangeIndex instances.\n\n When all members of "indexes" are of type RangeIndex: result will be\n RangeIndex if possible, Index with a int64 dtype otherwise. E.g.:\n indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)\n indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Index([0,1,2,4,5], dtype='int64')\n """\n if not all(isinstance(x, RangeIndex) for x in indexes):\n return super()._concat(indexes, name)\n\n elif len(indexes) == 1:\n return indexes[0]\n\n rng_indexes = cast(list[RangeIndex], indexes)\n\n start = step = next_ = None\n\n # Filter the empty indexes\n non_empty_indexes = [obj for obj in rng_indexes if len(obj)]\n\n for obj in non_empty_indexes:\n rng = obj._range\n\n if start is None:\n # This is set by the first non-empty index\n start = rng.start\n if step is None and len(rng) > 1:\n step = rng.step\n elif step is None:\n # First non-empty index had only one element\n if rng.start == start:\n values = np.concatenate([x._values for x in rng_indexes])\n result = self._constructor(values)\n return result.rename(name)\n\n step = rng.start - start\n\n non_consecutive = (step != rng.step and len(rng) > 1) or (\n next_ is not None and rng.start != next_\n )\n if non_consecutive:\n result = self._constructor(\n np.concatenate([x._values for x in rng_indexes])\n )\n return result.rename(name)\n\n if step is not None:\n next_ = rng[-1] + step\n\n if non_empty_indexes:\n # Get the stop value from "next" or alternatively\n # from the last non-empty index\n stop = non_empty_indexes[-1].stop if next_ is None else next_\n return RangeIndex(start, stop, step).rename(name)\n\n # Here all "indexes" had 0 length, i.e. were empty.\n # In this case return an empty range index.\n return RangeIndex(0, 0).rename(name)\n\n def __len__(self) -> int:\n """\n return the length of the RangeIndex\n """\n return len(self._range)\n\n @property\n def size(self) -> int:\n return len(self)\n\n def __getitem__(self, key):\n """\n Conserve RangeIndex type for scalar and slice keys.\n """\n if isinstance(key, slice):\n return self._getitem_slice(key)\n elif is_integer(key):\n new_key = int(key)\n try:\n return self._range[new_key]\n except IndexError as err:\n raise IndexError(\n f"index {key} is out of bounds for axis 0 with size {len(self)}"\n ) from err\n elif is_scalar(key):\n raise IndexError(\n "only integers, slices (`:`), "\n "ellipsis (`...`), numpy.newaxis (`None`) "\n "and integer or boolean "\n "arrays are valid indices"\n )\n return super().__getitem__(key)\n\n def _getitem_slice(self, slobj: slice) -> Self:\n """\n Fastpath for __getitem__ when we know we have a slice.\n """\n res = self._range[slobj]\n return type(self)._simple_new(res, name=self._name)\n\n @unpack_zerodim_and_defer("__floordiv__")\n def __floordiv__(self, other):\n if is_integer(other) and other != 0:\n if len(self) == 0 or self.start % other == 0 and self.step % other == 0:\n start = self.start // other\n step = self.step // other\n stop = start + len(self) * step\n new_range = range(start, stop, step or 1)\n return self._simple_new(new_range, name=self._name)\n if len(self) == 1:\n start = self.start // other\n new_range = range(start, start + 1, 1)\n return self._simple_new(new_range, name=self._name)\n\n return super().__floordiv__(other)\n\n # --------------------------------------------------------------------\n # Reductions\n\n def all(self, *args, **kwargs) -> bool:\n return 0 not in self._range\n\n def any(self, *args, **kwargs) -> bool:\n return any(self._range)\n\n # --------------------------------------------------------------------\n\n def _cmp_method(self, other, op):\n if isinstance(other, RangeIndex) and self._range == other._range:\n # Both are immutable so if ._range attr. are equal, shortcut is possible\n return super()._cmp_method(self, op)\n return super()._cmp_method(other, op)\n\n def _arith_method(self, other, op):\n """\n Parameters\n ----------\n other : Any\n op : callable that accepts 2 params\n perform the binary op\n """\n\n if isinstance(other, ABCTimedeltaIndex):\n # Defer to TimedeltaIndex implementation\n return NotImplemented\n elif isinstance(other, (timedelta, np.timedelta64)):\n # GH#19333 is_integer evaluated True on timedelta64,\n # so we need to catch these explicitly\n return super()._arith_method(other, op)\n elif lib.is_np_dtype(getattr(other, "dtype", None), "m"):\n # Must be an np.ndarray; GH#22390\n return super()._arith_method(other, op)\n\n if op in [\n operator.pow,\n ops.rpow,\n operator.mod,\n ops.rmod,\n operator.floordiv,\n ops.rfloordiv,\n divmod,\n ops.rdivmod,\n ]:\n return super()._arith_method(other, op)\n\n step: Callable | None = None\n if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:\n step = op\n\n # TODO: if other is a RangeIndex we may have more efficient options\n right = extract_array(other, extract_numpy=True, extract_range=True)\n left = self\n\n try:\n # apply if we have an override\n if step:\n with np.errstate(all="ignore"):\n rstep = step(left.step, right)\n\n # we don't have a representable op\n # so return a base index\n if not is_integer(rstep) or not rstep:\n raise ValueError\n\n # GH#53255\n else:\n rstep = -left.step if op == ops.rsub else left.step\n\n with np.errstate(all="ignore"):\n rstart = op(left.start, right)\n rstop = op(left.stop, right)\n\n res_name = ops.get_op_result_name(self, other)\n result = type(self)(rstart, rstop, rstep, name=res_name)\n\n # for compat with numpy / Index with int64 dtype\n # even if we can represent as a RangeIndex, return\n # as a float64 Index if we have float-like descriptors\n if not all(is_integer(x) for x in [rstart, rstop, rstep]):\n result = result.astype("float64")\n\n return result\n\n except (ValueError, TypeError, ZeroDivisionError):\n # test_arithmetic_explicit_conversions\n return super()._arith_method(other, op)\n\n # error: Return type "Index" of "take" incompatible with return type\n # "RangeIndex" in supertype "Index"\n def take( # type: ignore[override]\n self,\n indices,\n axis: Axis = 0,\n allow_fill: bool = True,\n fill_value=None,\n **kwargs,\n ) -> Index:\n if kwargs:\n nv.validate_take((), kwargs)\n if is_scalar(indices):\n raise TypeError("Expected indices to be array-like")\n indices = ensure_platform_int(indices)\n\n # raise an exception if allow_fill is True and fill_value is not None\n self._maybe_disallow_fill(allow_fill, fill_value, indices)\n\n if len(indices) == 0:\n taken = np.array([], dtype=self.dtype)\n else:\n ind_max = indices.max()\n if ind_max >= len(self):\n raise IndexError(\n f"index {ind_max} is out of bounds for axis 0 with size {len(self)}"\n )\n ind_min = indices.min()\n if ind_min < -len(self):\n raise IndexError(\n f"index {ind_min} is out of bounds for axis 0 with size {len(self)}"\n )\n taken = indices.astype(self.dtype, casting="safe")\n if ind_min < 0:\n taken %= len(self)\n if self.step != 1:\n taken *= self.step\n if self.start != 0:\n taken += self.start\n\n # _constructor so RangeIndex-> Index with an int64 dtype\n return self._constructor._simple_new(taken, name=self.name)\n | .venv\Lib\site-packages\pandas\core\indexes\range.py | range.py | Python | 39,511 | 0.95 | 0.184499 | 0.1 | vue-tools | 451 | 2024-03-05T22:43:21.538898 | GPL-3.0 | false | 3f99377216006856c70e0aeced85f733 |
""" implement the TimedeltaIndex """\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\nimport warnings\n\nfrom pandas._libs import (\n index as libindex,\n lib,\n)\nfrom pandas._libs.tslibs import (\n Resolution,\n Timedelta,\n to_offset,\n)\nfrom pandas._libs.tslibs.timedeltas import disallow_ambiguous_unit\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n is_scalar,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCSeries\n\nfrom pandas.core.arrays.timedeltas import TimedeltaArray\nimport pandas.core.common as com\nfrom pandas.core.indexes.base import (\n Index,\n maybe_extract_name,\n)\nfrom pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin\nfrom pandas.core.indexes.extension import inherit_names\n\nif TYPE_CHECKING:\n from pandas._typing import DtypeObj\n\n\n@inherit_names(\n ["__neg__", "__pos__", "__abs__", "total_seconds", "round", "floor", "ceil"]\n + TimedeltaArray._field_ops,\n TimedeltaArray,\n wrap=True,\n)\n@inherit_names(\n [\n "components",\n "to_pytimedelta",\n "sum",\n "std",\n "median",\n ],\n TimedeltaArray,\n)\nclass TimedeltaIndex(DatetimeTimedeltaMixin):\n """\n Immutable Index of timedelta64 data.\n\n Represented internally as int64, and scalars returned Timedelta objects.\n\n Parameters\n ----------\n data : array-like (1-dimensional), optional\n Optional timedelta-like data to construct index with.\n unit : {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional\n The unit of ``data``.\n\n .. deprecated:: 2.2.0\n Use ``pd.to_timedelta`` instead.\n\n freq : str or pandas offset object, optional\n One of pandas date offset strings or corresponding objects. The string\n ``'infer'`` can be passed in order to set the frequency of the index as\n the inferred frequency upon creation.\n dtype : numpy.dtype or str, default None\n Valid ``numpy`` dtypes are ``timedelta64[ns]``, ``timedelta64[us]``,\n ``timedelta64[ms]``, and ``timedelta64[s]``.\n copy : bool\n Make a copy of input array.\n name : object\n Name to be stored in the index.\n\n Attributes\n ----------\n days\n seconds\n microseconds\n nanoseconds\n components\n inferred_freq\n\n Methods\n -------\n to_pytimedelta\n to_series\n round\n floor\n ceil\n to_frame\n mean\n\n See Also\n --------\n Index : The base pandas Index type.\n Timedelta : Represents a duration between two dates or times.\n DatetimeIndex : Index of datetime64 data.\n PeriodIndex : Index of Period data.\n timedelta_range : Create a fixed-frequency TimedeltaIndex.\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n >>> pd.TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'])\n TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq=None)\n\n We can also let pandas infer the frequency when possible.\n\n >>> pd.TimedeltaIndex(np.arange(5) * 24 * 3600 * 1e9, freq='infer')\n TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq='D')\n """\n\n _typ = "timedeltaindex"\n\n _data_cls = TimedeltaArray\n\n @property\n def _engine_type(self) -> type[libindex.TimedeltaEngine]:\n return libindex.TimedeltaEngine\n\n _data: TimedeltaArray\n\n # Use base class method instead of DatetimeTimedeltaMixin._get_string_slice\n _get_string_slice = Index._get_string_slice\n\n # error: Signature of "_resolution_obj" incompatible with supertype\n # "DatetimeIndexOpsMixin"\n @property\n def _resolution_obj(self) -> Resolution | None: # type: ignore[override]\n return self._data._resolution_obj\n\n # -------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data=None,\n unit=lib.no_default,\n freq=lib.no_default,\n closed=lib.no_default,\n dtype=None,\n copy: bool = False,\n name=None,\n ):\n if closed is not lib.no_default:\n # GH#52628\n warnings.warn(\n f"The 'closed' keyword in {cls.__name__} construction is "\n "deprecated and will be removed in a future version.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if unit is not lib.no_default:\n # GH#55499\n warnings.warn(\n f"The 'unit' keyword in {cls.__name__} construction is "\n "deprecated and will be removed in a future version. "\n "Use pd.to_timedelta instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n else:\n unit = None\n\n name = maybe_extract_name(name, data, cls)\n\n if is_scalar(data):\n cls._raise_scalar_data_error(data)\n\n disallow_ambiguous_unit(unit)\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n if (\n isinstance(data, TimedeltaArray)\n and freq is lib.no_default\n and (dtype is None or dtype == data.dtype)\n ):\n if copy:\n data = data.copy()\n return cls._simple_new(data, name=name)\n\n if (\n isinstance(data, TimedeltaIndex)\n and freq is lib.no_default\n and name is None\n and (dtype is None or dtype == data.dtype)\n ):\n if copy:\n return data.copy()\n else:\n return data._view()\n\n # - Cases checked above all return/raise before reaching here - #\n\n tdarr = TimedeltaArray._from_sequence_not_strict(\n data, freq=freq, unit=unit, dtype=dtype, copy=copy\n )\n refs = None\n if not copy and isinstance(data, (ABCSeries, Index)):\n refs = data._references\n\n return cls._simple_new(tdarr, name=name, refs=refs)\n\n # -------------------------------------------------------------------\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n """\n Can we compare values of the given dtype to our own?\n """\n return lib.is_np_dtype(dtype, "m") # aka self._data._is_recognized_dtype\n\n # -------------------------------------------------------------------\n # Indexing Methods\n\n def get_loc(self, key):\n """\n Get integer location for requested label\n\n Returns\n -------\n loc : int, slice, or ndarray[int]\n """\n self._check_indexing_error(key)\n\n try:\n key = self._data._validate_scalar(key, unbox=False)\n except TypeError as err:\n raise KeyError(key) from err\n\n return Index.get_loc(self, key)\n\n def _parse_with_reso(self, label: str):\n # the "with_reso" is a no-op for TimedeltaIndex\n parsed = Timedelta(label)\n return parsed, None\n\n def _parsed_string_to_bounds(self, reso, parsed: Timedelta):\n # reso is unused, included to match signature of DTI/PI\n lbound = parsed.round(parsed.resolution_string)\n rbound = lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns")\n return lbound, rbound\n\n # -------------------------------------------------------------------\n\n @property\n def inferred_type(self) -> str:\n return "timedelta64"\n\n\ndef timedelta_range(\n start=None,\n end=None,\n periods: int | None = None,\n freq=None,\n name=None,\n closed=None,\n *,\n unit: str | None = None,\n) -> TimedeltaIndex:\n """\n Return a fixed frequency TimedeltaIndex with day as the default.\n\n Parameters\n ----------\n start : str or timedelta-like, default None\n Left bound for generating timedeltas.\n end : str or timedelta-like, default None\n Right bound for generating timedeltas.\n periods : int, default None\n Number of periods to generate.\n freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D'\n Frequency strings can have multiples, e.g. '5h'.\n name : str, default None\n Name of the resulting TimedeltaIndex.\n closed : str, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None).\n unit : str, default None\n Specify the desired resolution of the result.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n TimedeltaIndex\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end`` (closed on both sides).\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n >>> pd.timedelta_range(start='1 day', periods=4)\n TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq='D')\n\n The ``closed`` parameter specifies which endpoint is included. The default\n behavior is to include both endpoints.\n\n >>> pd.timedelta_range(start='1 day', periods=4, closed='right')\n TimedeltaIndex(['2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq='D')\n\n The ``freq`` parameter specifies the frequency of the TimedeltaIndex.\n Only fixed frequencies can be passed, non-fixed frequencies such as\n 'M' (month end) will raise.\n\n >>> pd.timedelta_range(start='1 day', end='2 days', freq='6h')\n TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',\n '1 days 18:00:00', '2 days 00:00:00'],\n dtype='timedelta64[ns]', freq='6h')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.timedelta_range(start='1 day', end='5 days', periods=4)\n TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',\n '5 days 00:00:00'],\n dtype='timedelta64[ns]', freq=None)\n\n **Specify a unit**\n\n >>> pd.timedelta_range("1 Day", periods=3, freq="100000D", unit="s")\n TimedeltaIndex(['1 days', '100001 days', '200001 days'],\n dtype='timedelta64[s]', freq='100000D')\n """\n if freq is None and com.any_none(periods, start, end):\n freq = "D"\n\n freq = to_offset(freq)\n tdarr = TimedeltaArray._generate_range(\n start, end, periods, freq, closed=closed, unit=unit\n )\n return TimedeltaIndex._simple_new(tdarr, name=name)\n | .venv\Lib\site-packages\pandas\core\indexes\timedeltas.py | timedeltas.py | Python | 10,960 | 0.95 | 0.075843 | 0.054608 | vue-tools | 713 | 2025-01-13T06:27:31.715890 | GPL-3.0 | false | fd0749ac2223e11c4c13ebfeff3f13a8 |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\accessors.cpython-313.pyc | accessors.cpython-313.pyc | Other | 22,074 | 0.8 | 0.028302 | 0 | python-kit | 161 | 2024-02-28T19:31:20.933958 | BSD-3-Clause | false | 4a1fd67fa9fb7d2c78a38bdb4c2ade7b |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\api.cpython-313.pyc | api.cpython-313.pyc | Other | 12,860 | 0.8 | 0.033019 | 0 | awesome-app | 189 | 2024-06-15T11:25:07.781883 | BSD-3-Clause | false | 9683e9786d5e7d155357108d71e915b9 |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\category.cpython-313.pyc | category.cpython-313.pyc | Other | 17,062 | 0.95 | 0.081699 | 0.003788 | awesome-app | 105 | 2025-03-11T05:01:21.718735 | MIT | false | dbdd1cc037a652d80d03b30be5755137 |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\datetimelike.cpython-313.pyc | datetimelike.cpython-313.pyc | Other | 33,197 | 0.95 | 0.032573 | 0.003571 | awesome-app | 685 | 2024-03-27T23:42:11.486647 | GPL-3.0 | false | fd26697dcac40267243cc48762f2a574 |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\datetimes.cpython-313.pyc | datetimes.cpython-313.pyc | Other | 40,430 | 0.95 | 0.041481 | 0.015177 | python-kit | 850 | 2024-08-31T19:51:46.701361 | MIT | false | 3a54a69e524d07adeefc88c6e2681cf8 |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\extension.cpython-313.pyc | extension.cpython-313.pyc | Other | 7,151 | 0.95 | 0.093333 | 0 | react-lib | 31 | 2025-06-25T15:44:23.790771 | MIT | false | e5d0ec08780efead474ed0e41c26ea2a |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\frozen.cpython-313.pyc | frozen.cpython-313.pyc | Other | 4,952 | 0.95 | 0.046154 | 0 | vue-tools | 243 | 2025-03-18T03:55:00.848104 | Apache-2.0 | false | eae88201ae8b27a21466d65ed26e0ed8 |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\interval.cpython-313.pyc | interval.cpython-313.pyc | Other | 41,430 | 0.95 | 0.061914 | 0.006356 | awesome-app | 591 | 2023-11-13T02:43:49.276267 | MIT | false | ad3fd9b23f3427616bf5a055eecce9ca |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\period.cpython-313.pyc | period.cpython-313.pyc | Other | 21,731 | 0.8 | 0.028754 | 0.007092 | react-lib | 121 | 2024-03-31T14:08:41.345906 | BSD-3-Clause | false | 825aab3ce1f174c18c23ab5876023059 |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\range.cpython-313.pyc | range.cpython-313.pyc | Other | 46,402 | 0.95 | 0.049724 | 0 | awesome-app | 209 | 2025-03-03T05:59:57.953650 | BSD-3-Clause | false | 99426d1ade7ac2e0f84fce91712393c2 |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\timedeltas.cpython-313.pyc | timedeltas.cpython-313.pyc | Other | 11,668 | 0.8 | 0.014019 | 0.005376 | react-lib | 294 | 2024-07-06T04:36:44.296107 | Apache-2.0 | false | 4a53918f701a68134e4636d205f06764 |
\n\n | .venv\Lib\site-packages\pandas\core\indexes\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 194 | 0.7 | 0 | 0 | react-lib | 385 | 2024-08-28T23:47:09.043924 | MIT | false | 372c73679e689629a522ac8f7e3f81fe |
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nfrom pandas.core.interchange.dataframe_protocol import (\n Buffer,\n DlpackDeviceType,\n)\n\nif TYPE_CHECKING:\n import numpy as np\n import pyarrow as pa\n\n\nclass PandasBuffer(Buffer):\n """\n Data in the buffer is guaranteed to be contiguous in memory.\n """\n\n def __init__(self, x: np.ndarray, allow_copy: bool = True) -> None:\n """\n Handle only regular columns (= numpy arrays) for now.\n """\n if x.strides[0] and not x.strides == (x.dtype.itemsize,):\n # The protocol does not support strided buffers, so a copy is\n # necessary. If that's not allowed, we need to raise an exception.\n if allow_copy:\n x = x.copy()\n else:\n raise RuntimeError(\n "Exports cannot be zero-copy in the case "\n "of a non-contiguous buffer"\n )\n\n # Store the numpy array in which the data resides as a private\n # attribute, so we can use it to retrieve the public attributes\n self._x = x\n\n @property\n def bufsize(self) -> int:\n """\n Buffer size in bytes.\n """\n return self._x.size * self._x.dtype.itemsize\n\n @property\n def ptr(self) -> int:\n """\n Pointer to start of the buffer as an integer.\n """\n return self._x.__array_interface__["data"][0]\n\n def __dlpack__(self) -> Any:\n """\n Represent this structure as DLPack interface.\n """\n return self._x.__dlpack__()\n\n def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:\n """\n Device type and device ID for where the data in the buffer resides.\n """\n return (DlpackDeviceType.CPU, None)\n\n def __repr__(self) -> str:\n return (\n "PandasBuffer("\n + str(\n {\n "bufsize": self.bufsize,\n "ptr": self.ptr,\n "device": self.__dlpack_device__()[0].name,\n }\n )\n + ")"\n )\n\n\nclass PandasBufferPyarrow(Buffer):\n """\n Data in the buffer is guaranteed to be contiguous in memory.\n """\n\n def __init__(\n self,\n buffer: pa.Buffer,\n *,\n length: int,\n ) -> None:\n """\n Handle pyarrow chunked arrays.\n """\n self._buffer = buffer\n self._length = length\n\n @property\n def bufsize(self) -> int:\n """\n Buffer size in bytes.\n """\n return self._buffer.size\n\n @property\n def ptr(self) -> int:\n """\n Pointer to start of the buffer as an integer.\n """\n return self._buffer.address\n\n def __dlpack__(self) -> Any:\n """\n Represent this structure as DLPack interface.\n """\n raise NotImplementedError()\n\n def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:\n """\n Device type and device ID for where the data in the buffer resides.\n """\n return (DlpackDeviceType.CPU, None)\n\n def __repr__(self) -> str:\n return (\n "PandasBuffer[pyarrow]("\n + str(\n {\n "bufsize": self.bufsize,\n "ptr": self.ptr,\n "device": "CPU",\n }\n )\n + ")"\n )\n | .venv\Lib\site-packages\pandas\core\interchange\buffer.py | buffer.py | Python | 3,453 | 0.95 | 0.147059 | 0.043103 | react-lib | 241 | 2025-06-05T18:10:00.108330 | Apache-2.0 | false | 93a903c969bae7cd1bf9ffd28a07631d |
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nimport numpy as np\n\nfrom pandas._libs.lib import infer_dtype\nfrom pandas._libs.tslibs import iNaT\nfrom pandas.errors import NoBufferPresent\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.dtypes import BaseMaskedDtype\n\nimport pandas as pd\nfrom pandas import (\n ArrowDtype,\n DatetimeTZDtype,\n)\nfrom pandas.api.types import is_string_dtype\nfrom pandas.core.interchange.buffer import (\n PandasBuffer,\n PandasBufferPyarrow,\n)\nfrom pandas.core.interchange.dataframe_protocol import (\n Column,\n ColumnBuffers,\n ColumnNullType,\n DtypeKind,\n)\nfrom pandas.core.interchange.utils import (\n ArrowCTypes,\n Endianness,\n dtype_to_arrow_c_fmt,\n)\n\nif TYPE_CHECKING:\n from pandas.core.interchange.dataframe_protocol import Buffer\n\n_NP_KINDS = {\n "i": DtypeKind.INT,\n "u": DtypeKind.UINT,\n "f": DtypeKind.FLOAT,\n "b": DtypeKind.BOOL,\n "U": DtypeKind.STRING,\n "M": DtypeKind.DATETIME,\n "m": DtypeKind.DATETIME,\n}\n\n_NULL_DESCRIPTION = {\n DtypeKind.FLOAT: (ColumnNullType.USE_NAN, None),\n DtypeKind.DATETIME: (ColumnNullType.USE_SENTINEL, iNaT),\n DtypeKind.INT: (ColumnNullType.NON_NULLABLE, None),\n DtypeKind.UINT: (ColumnNullType.NON_NULLABLE, None),\n DtypeKind.BOOL: (ColumnNullType.NON_NULLABLE, None),\n # Null values for categoricals are stored as `-1` sentinel values\n # in the category date (e.g., `col.values.codes` is int8 np.ndarray)\n DtypeKind.CATEGORICAL: (ColumnNullType.USE_SENTINEL, -1),\n # follow Arrow in using 1 as valid value and 0 for missing/null value\n DtypeKind.STRING: (ColumnNullType.USE_BYTEMASK, 0),\n}\n\n_NO_VALIDITY_BUFFER = {\n ColumnNullType.NON_NULLABLE: "This column is non-nullable",\n ColumnNullType.USE_NAN: "This column uses NaN as null",\n ColumnNullType.USE_SENTINEL: "This column uses a sentinel value",\n}\n\n\nclass PandasColumn(Column):\n """\n A column object, with only the methods and properties required by the\n interchange protocol defined.\n A column can contain one or more chunks. Each chunk can contain up to three\n buffers - a data buffer, a mask buffer (depending on null representation),\n and an offsets buffer (if variable-size binary; e.g., variable-length\n strings).\n Note: this Column object can only be produced by ``__dataframe__``, so\n doesn't need its own version or ``__column__`` protocol.\n """\n\n def __init__(self, column: pd.Series, allow_copy: bool = True) -> None:\n """\n Note: doesn't deal with extension arrays yet, just assume a regular\n Series/ndarray for now.\n """\n if isinstance(column, pd.DataFrame):\n raise TypeError(\n "Expected a Series, got a DataFrame. This likely happened "\n "because you called __dataframe__ on a DataFrame which, "\n "after converting column names to string, resulted in duplicated "\n f"names: {column.columns}. Please rename these columns before "\n "using the interchange protocol."\n )\n if not isinstance(column, pd.Series):\n raise NotImplementedError(f"Columns of type {type(column)} not handled yet")\n\n # Store the column as a private attribute\n self._col = column\n self._allow_copy = allow_copy\n\n def size(self) -> int:\n """\n Size of the column, in elements.\n """\n return self._col.size\n\n @property\n def offset(self) -> int:\n """\n Offset of first element. Always zero.\n """\n # TODO: chunks are implemented now, probably this should return something\n return 0\n\n @cache_readonly\n def dtype(self) -> tuple[DtypeKind, int, str, str]:\n dtype = self._col.dtype\n\n if isinstance(dtype, pd.CategoricalDtype):\n codes = self._col.values.codes\n (\n _,\n bitwidth,\n c_arrow_dtype_f_str,\n _,\n ) = self._dtype_from_pandasdtype(codes.dtype)\n return (\n DtypeKind.CATEGORICAL,\n bitwidth,\n c_arrow_dtype_f_str,\n Endianness.NATIVE,\n )\n elif is_string_dtype(dtype):\n if infer_dtype(self._col) in ("string", "empty"):\n return (\n DtypeKind.STRING,\n 8,\n dtype_to_arrow_c_fmt(dtype),\n Endianness.NATIVE,\n )\n raise NotImplementedError("Non-string object dtypes are not supported yet")\n else:\n return self._dtype_from_pandasdtype(dtype)\n\n def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]:\n """\n See `self.dtype` for details.\n """\n # Note: 'c' (complex) not handled yet (not in array spec v1).\n # 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void) not handled\n # datetime and timedelta both map to datetime (is timedelta handled?)\n\n kind = _NP_KINDS.get(dtype.kind, None)\n if kind is None:\n # Not a NumPy dtype. Check if it's a categorical maybe\n raise ValueError(f"Data type {dtype} not supported by interchange protocol")\n if isinstance(dtype, ArrowDtype):\n byteorder = dtype.numpy_dtype.byteorder\n elif isinstance(dtype, DatetimeTZDtype):\n byteorder = dtype.base.byteorder # type: ignore[union-attr]\n elif isinstance(dtype, BaseMaskedDtype):\n byteorder = dtype.numpy_dtype.byteorder\n else:\n byteorder = dtype.byteorder\n\n if dtype == "bool[pyarrow]":\n # return early to avoid the `* 8` below, as this is a bitmask\n # rather than a bytemask\n return (\n kind,\n dtype.itemsize, # pyright: ignore[reportGeneralTypeIssues]\n ArrowCTypes.BOOL,\n byteorder,\n )\n\n return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder\n\n @property\n def describe_categorical(self):\n """\n If the dtype is categorical, there are two options:\n - There are only values in the data buffer.\n - There is a separate non-categorical Column encoding for categorical values.\n\n Raises TypeError if the dtype is not categorical\n\n Content of returned dict:\n - "is_ordered" : bool, whether the ordering of dictionary indices is\n semantically meaningful.\n - "is_dictionary" : bool, whether a dictionary-style mapping of\n categorical values to other objects exists\n - "categories" : Column representing the (implicit) mapping of indices to\n category values (e.g. an array of cat1, cat2, ...).\n None if not a dictionary-style categorical.\n """\n if not self.dtype[0] == DtypeKind.CATEGORICAL:\n raise TypeError(\n "describe_categorical only works on a column with categorical dtype!"\n )\n\n return {\n "is_ordered": self._col.cat.ordered,\n "is_dictionary": True,\n "categories": PandasColumn(pd.Series(self._col.cat.categories)),\n }\n\n @property\n def describe_null(self):\n if isinstance(self._col.dtype, BaseMaskedDtype):\n column_null_dtype = ColumnNullType.USE_BYTEMASK\n null_value = 1\n return column_null_dtype, null_value\n if isinstance(self._col.dtype, ArrowDtype):\n # We already rechunk (if necessary / allowed) upon initialization, so this\n # is already single-chunk by the time we get here.\n if self._col.array._pa_array.chunks[0].buffers()[0] is None: # type: ignore[attr-defined]\n return ColumnNullType.NON_NULLABLE, None\n return ColumnNullType.USE_BITMASK, 0\n kind = self.dtype[0]\n try:\n null, value = _NULL_DESCRIPTION[kind]\n except KeyError:\n raise NotImplementedError(f"Data type {kind} not yet supported")\n\n return null, value\n\n @cache_readonly\n def null_count(self) -> int:\n """\n Number of null elements. Should always be known.\n """\n return self._col.isna().sum().item()\n\n @property\n def metadata(self) -> dict[str, pd.Index]:\n """\n Store specific metadata of the column.\n """\n return {"pandas.index": self._col.index}\n\n def num_chunks(self) -> int:\n """\n Return the number of chunks the column consists of.\n """\n return 1\n\n def get_chunks(self, n_chunks: int | None = None):\n """\n Return an iterator yielding the chunks.\n See `DataFrame.get_chunks` for details on ``n_chunks``.\n """\n if n_chunks and n_chunks > 1:\n size = len(self._col)\n step = size // n_chunks\n if size % n_chunks != 0:\n step += 1\n for start in range(0, step * n_chunks, step):\n yield PandasColumn(\n self._col.iloc[start : start + step], self._allow_copy\n )\n else:\n yield self\n\n def get_buffers(self) -> ColumnBuffers:\n """\n Return a dictionary containing the underlying buffers.\n The returned dictionary has the following contents:\n - "data": a two-element tuple whose first element is a buffer\n containing the data and whose second element is the data\n buffer's associated dtype.\n - "validity": a two-element tuple whose first element is a buffer\n containing mask values indicating missing data and\n whose second element is the mask value buffer's\n associated dtype. None if the null representation is\n not a bit or byte mask.\n - "offsets": a two-element tuple whose first element is a buffer\n containing the offset values for variable-size binary\n data (e.g., variable-length strings) and whose second\n element is the offsets buffer's associated dtype. None\n if the data buffer does not have an associated offsets\n buffer.\n """\n buffers: ColumnBuffers = {\n "data": self._get_data_buffer(),\n "validity": None,\n "offsets": None,\n }\n\n try:\n buffers["validity"] = self._get_validity_buffer()\n except NoBufferPresent:\n pass\n\n try:\n buffers["offsets"] = self._get_offsets_buffer()\n except NoBufferPresent:\n pass\n\n return buffers\n\n def _get_data_buffer(\n self,\n ) -> tuple[Buffer, tuple[DtypeKind, int, str, str]]:\n """\n Return the buffer containing the data and the buffer's associated dtype.\n """\n buffer: Buffer\n if self.dtype[0] in (\n DtypeKind.INT,\n DtypeKind.UINT,\n DtypeKind.FLOAT,\n DtypeKind.BOOL,\n DtypeKind.DATETIME,\n ):\n # self.dtype[2] is an ArrowCTypes.TIMESTAMP where the tz will make\n # it longer than 4 characters\n dtype = self.dtype\n if self.dtype[0] == DtypeKind.DATETIME and len(self.dtype[2]) > 4:\n np_arr = self._col.dt.tz_convert(None).to_numpy()\n else:\n arr = self._col.array\n if isinstance(self._col.dtype, BaseMaskedDtype):\n np_arr = arr._data # type: ignore[attr-defined]\n elif isinstance(self._col.dtype, ArrowDtype):\n # We already rechunk (if necessary / allowed) upon initialization,\n # so this is already single-chunk by the time we get here.\n arr = arr._pa_array.chunks[0] # type: ignore[attr-defined]\n buffer = PandasBufferPyarrow(\n arr.buffers()[1], # type: ignore[attr-defined]\n length=len(arr),\n )\n return buffer, dtype\n else:\n np_arr = arr._ndarray # type: ignore[attr-defined]\n buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy)\n elif self.dtype[0] == DtypeKind.CATEGORICAL:\n codes = self._col.values._codes\n buffer = PandasBuffer(codes, allow_copy=self._allow_copy)\n dtype = self._dtype_from_pandasdtype(codes.dtype)\n elif self.dtype[0] == DtypeKind.STRING:\n # Marshal the strings from a NumPy object array into a byte array\n buf = self._col.to_numpy()\n b = bytearray()\n\n # TODO: this for-loop is slow; can be implemented in Cython/C/C++ later\n for obj in buf:\n if isinstance(obj, str):\n b.extend(obj.encode(encoding="utf-8"))\n\n # Convert the byte array to a Pandas "buffer" using\n # a NumPy array as the backing store\n buffer = PandasBuffer(np.frombuffer(b, dtype="uint8"))\n\n # Define the dtype for the returned buffer\n # TODO: this will need correcting\n # https://github.com/pandas-dev/pandas/issues/54781\n dtype = self.dtype\n else:\n raise NotImplementedError(f"Data type {self._col.dtype} not handled yet")\n\n return buffer, dtype\n\n def _get_validity_buffer(self) -> tuple[Buffer, Any] | None:\n """\n Return the buffer containing the mask values indicating missing data and\n the buffer's associated dtype.\n Raises NoBufferPresent if null representation is not a bit or byte mask.\n """\n null, invalid = self.describe_null\n buffer: Buffer\n if isinstance(self._col.dtype, ArrowDtype):\n # We already rechunk (if necessary / allowed) upon initialization, so this\n # is already single-chunk by the time we get here.\n arr = self._col.array._pa_array.chunks[0] # type: ignore[attr-defined]\n dtype = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, Endianness.NATIVE)\n if arr.buffers()[0] is None:\n return None\n buffer = PandasBufferPyarrow(\n arr.buffers()[0],\n length=len(arr),\n )\n return buffer, dtype\n\n if isinstance(self._col.dtype, BaseMaskedDtype):\n mask = self._col.array._mask # type: ignore[attr-defined]\n buffer = PandasBuffer(mask)\n dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE)\n return buffer, dtype\n\n if self.dtype[0] == DtypeKind.STRING:\n # For now, use byte array as the mask.\n # TODO: maybe store as bit array to save space?..\n buf = self._col.to_numpy()\n\n # Determine the encoding for valid values\n valid = invalid == 0\n invalid = not valid\n\n mask = np.zeros(shape=(len(buf),), dtype=np.bool_)\n for i, obj in enumerate(buf):\n mask[i] = valid if isinstance(obj, str) else invalid\n\n # Convert the mask array to a Pandas "buffer" using\n # a NumPy array as the backing store\n buffer = PandasBuffer(mask)\n\n # Define the dtype of the returned buffer\n dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE)\n\n return buffer, dtype\n\n try:\n msg = f"{_NO_VALIDITY_BUFFER[null]} so does not have a separate mask"\n except KeyError:\n # TODO: implement for other bit/byte masks?\n raise NotImplementedError("See self.describe_null")\n\n raise NoBufferPresent(msg)\n\n def _get_offsets_buffer(self) -> tuple[PandasBuffer, Any]:\n """\n Return the buffer containing the offset values for variable-size binary\n data (e.g., variable-length strings) and the buffer's associated dtype.\n Raises NoBufferPresent if the data buffer does not have an associated\n offsets buffer.\n """\n if self.dtype[0] == DtypeKind.STRING:\n # For each string, we need to manually determine the next offset\n values = self._col.to_numpy()\n ptr = 0\n offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64)\n for i, v in enumerate(values):\n # For missing values (in this case, `np.nan` values)\n # we don't increment the pointer\n if isinstance(v, str):\n b = v.encode(encoding="utf-8")\n ptr += len(b)\n\n offsets[i + 1] = ptr\n\n # Convert the offsets to a Pandas "buffer" using\n # the NumPy array as the backing store\n buffer = PandasBuffer(offsets)\n\n # Assemble the buffer dtype info\n dtype = (\n DtypeKind.INT,\n 64,\n ArrowCTypes.INT64,\n Endianness.NATIVE,\n ) # note: currently only support native endianness\n else:\n raise NoBufferPresent(\n "This column has a fixed-length dtype so "\n "it does not have an offsets buffer"\n )\n\n return buffer, dtype\n | .venv\Lib\site-packages\pandas\core\interchange\column.py | column.py | Python | 17,547 | 0.95 | 0.156182 | 0.096059 | react-lib | 533 | 2024-12-07T14:20:08.969053 | MIT | false | 945093146f00a218b4e7d32855a7a8bb |
from __future__ import annotations\n\nfrom collections import abc\nfrom typing import TYPE_CHECKING\n\nfrom pandas.core.interchange.column import PandasColumn\nfrom pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg\nfrom pandas.core.interchange.utils import maybe_rechunk\n\nif TYPE_CHECKING:\n from collections.abc import (\n Iterable,\n Sequence,\n )\n\n from pandas import (\n DataFrame,\n Index,\n )\n\n\nclass PandasDataFrameXchg(DataFrameXchg):\n """\n A data frame class, with only the methods required by the interchange\n protocol defined.\n Instances of this (private) class are returned from\n ``pd.DataFrame.__dataframe__`` as objects with the methods and\n attributes defined on this class.\n """\n\n def __init__(self, df: DataFrame, allow_copy: bool = True) -> None:\n """\n Constructor - an instance of this (private) class is returned from\n `pd.DataFrame.__dataframe__`.\n """\n self._df = df.rename(columns=str, copy=False)\n self._allow_copy = allow_copy\n for i, _col in enumerate(self._df.columns):\n rechunked = maybe_rechunk(self._df.iloc[:, i], allow_copy=allow_copy)\n if rechunked is not None:\n self._df.isetitem(i, rechunked)\n\n def __dataframe__(\n self, nan_as_null: bool = False, allow_copy: bool = True\n ) -> PandasDataFrameXchg:\n # `nan_as_null` can be removed here once it's removed from\n # Dataframe.__dataframe__\n return PandasDataFrameXchg(self._df, allow_copy)\n\n @property\n def metadata(self) -> dict[str, Index]:\n # `index` isn't a regular column, and the protocol doesn't support row\n # labels - so we export it as Pandas-specific metadata here.\n return {"pandas.index": self._df.index}\n\n def num_columns(self) -> int:\n return len(self._df.columns)\n\n def num_rows(self) -> int:\n return len(self._df)\n\n def num_chunks(self) -> int:\n return 1\n\n def column_names(self) -> Index:\n return self._df.columns\n\n def get_column(self, i: int) -> PandasColumn:\n return PandasColumn(self._df.iloc[:, i], allow_copy=self._allow_copy)\n\n def get_column_by_name(self, name: str) -> PandasColumn:\n return PandasColumn(self._df[name], allow_copy=self._allow_copy)\n\n def get_columns(self) -> list[PandasColumn]:\n return [\n PandasColumn(self._df[name], allow_copy=self._allow_copy)\n for name in self._df.columns\n ]\n\n def select_columns(self, indices: Sequence[int]) -> PandasDataFrameXchg:\n if not isinstance(indices, abc.Sequence):\n raise ValueError("`indices` is not a sequence")\n if not isinstance(indices, list):\n indices = list(indices)\n\n return PandasDataFrameXchg(\n self._df.iloc[:, indices], allow_copy=self._allow_copy\n )\n\n def select_columns_by_name(self, names: list[str]) -> PandasDataFrameXchg: # type: ignore[override]\n if not isinstance(names, abc.Sequence):\n raise ValueError("`names` is not a sequence")\n if not isinstance(names, list):\n names = list(names)\n\n return PandasDataFrameXchg(self._df.loc[:, names], allow_copy=self._allow_copy)\n\n def get_chunks(self, n_chunks: int | None = None) -> Iterable[PandasDataFrameXchg]:\n """\n Return an iterator yielding the chunks.\n """\n if n_chunks and n_chunks > 1:\n size = len(self._df)\n step = size // n_chunks\n if size % n_chunks != 0:\n step += 1\n for start in range(0, step * n_chunks, step):\n yield PandasDataFrameXchg(\n self._df.iloc[start : start + step, :],\n allow_copy=self._allow_copy,\n )\n else:\n yield self\n | .venv\Lib\site-packages\pandas\core\interchange\dataframe.py | dataframe.py | Python | 3,879 | 0.95 | 0.256637 | 0.043478 | react-lib | 10 | 2025-05-21T20:18:07.822454 | MIT | false | dbf729b300104affc5ee9d5480a6f235 |
"""\nA verbatim copy (vendored) of the spec from https://github.com/data-apis/dataframe-api\n"""\n\nfrom __future__ import annotations\n\nfrom abc import (\n ABC,\n abstractmethod,\n)\nimport enum\nfrom typing import (\n TYPE_CHECKING,\n Any,\n TypedDict,\n)\n\nif TYPE_CHECKING:\n from collections.abc import (\n Iterable,\n Sequence,\n )\n\n\nclass DlpackDeviceType(enum.IntEnum):\n """Integer enum for device type codes matching DLPack."""\n\n CPU = 1\n CUDA = 2\n CPU_PINNED = 3\n OPENCL = 4\n VULKAN = 7\n METAL = 8\n VPI = 9\n ROCM = 10\n\n\nclass DtypeKind(enum.IntEnum):\n """\n Integer enum for data types.\n\n Attributes\n ----------\n INT : int\n Matches to signed integer data type.\n UINT : int\n Matches to unsigned integer data type.\n FLOAT : int\n Matches to floating point data type.\n BOOL : int\n Matches to boolean data type.\n STRING : int\n Matches to string data type (UTF-8 encoded).\n DATETIME : int\n Matches to datetime data type.\n CATEGORICAL : int\n Matches to categorical data type.\n """\n\n INT = 0\n UINT = 1\n FLOAT = 2\n BOOL = 20\n STRING = 21 # UTF-8\n DATETIME = 22\n CATEGORICAL = 23\n\n\nclass ColumnNullType(enum.IntEnum):\n """\n Integer enum for null type representation.\n\n Attributes\n ----------\n NON_NULLABLE : int\n Non-nullable column.\n USE_NAN : int\n Use explicit float NaN value.\n USE_SENTINEL : int\n Sentinel value besides NaN/NaT.\n USE_BITMASK : int\n The bit is set/unset representing a null on a certain position.\n USE_BYTEMASK : int\n The byte is set/unset representing a null on a certain position.\n """\n\n NON_NULLABLE = 0\n USE_NAN = 1\n USE_SENTINEL = 2\n USE_BITMASK = 3\n USE_BYTEMASK = 4\n\n\nclass ColumnBuffers(TypedDict):\n # first element is a buffer containing the column data;\n # second element is the data buffer's associated dtype\n data: tuple[Buffer, Any]\n\n # first element is a buffer containing mask values indicating missing data;\n # second element is the mask value buffer's associated dtype.\n # None if the null representation is not a bit or byte mask\n validity: tuple[Buffer, Any] | None\n\n # first element is a buffer containing the offset values for\n # variable-size binary data (e.g., variable-length strings);\n # second element is the offsets buffer's associated dtype.\n # None if the data buffer does not have an associated offsets buffer\n offsets: tuple[Buffer, Any] | None\n\n\nclass CategoricalDescription(TypedDict):\n # whether the ordering of dictionary indices is semantically meaningful\n is_ordered: bool\n # whether a dictionary-style mapping of categorical values to other objects exists\n is_dictionary: bool\n # Python-level only (e.g. ``{int: str}``).\n # None if not a dictionary-style categorical.\n categories: Column | None\n\n\nclass Buffer(ABC):\n """\n Data in the buffer is guaranteed to be contiguous in memory.\n\n Note that there is no dtype attribute present, a buffer can be thought of\n as simply a block of memory. However, if the column that the buffer is\n attached to has a dtype that's supported by DLPack and ``__dlpack__`` is\n implemented, then that dtype information will be contained in the return\n value from ``__dlpack__``.\n\n This distinction is useful to support both data exchange via DLPack on a\n buffer and (b) dtypes like variable-length strings which do not have a\n fixed number of bytes per element.\n """\n\n @property\n @abstractmethod\n def bufsize(self) -> int:\n """\n Buffer size in bytes.\n """\n\n @property\n @abstractmethod\n def ptr(self) -> int:\n """\n Pointer to start of the buffer as an integer.\n """\n\n @abstractmethod\n def __dlpack__(self):\n """\n Produce DLPack capsule (see array API standard).\n\n Raises:\n\n - TypeError : if the buffer contains unsupported dtypes.\n - NotImplementedError : if DLPack support is not implemented\n\n Useful to have to connect to array libraries. Support optional because\n it's not completely trivial to implement for a Python-only library.\n """\n raise NotImplementedError("__dlpack__")\n\n @abstractmethod\n def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:\n """\n Device type and device ID for where the data in the buffer resides.\n Uses device type codes matching DLPack.\n Note: must be implemented even if ``__dlpack__`` is not.\n """\n\n\nclass Column(ABC):\n """\n A column object, with only the methods and properties required by the\n interchange protocol defined.\n\n A column can contain one or more chunks. Each chunk can contain up to three\n buffers - a data buffer, a mask buffer (depending on null representation),\n and an offsets buffer (if variable-size binary; e.g., variable-length\n strings).\n\n TBD: Arrow has a separate "null" dtype, and has no separate mask concept.\n Instead, it seems to use "children" for both columns with a bit mask,\n and for nested dtypes. Unclear whether this is elegant or confusing.\n This design requires checking the null representation explicitly.\n\n The Arrow design requires checking:\n 1. the ARROW_FLAG_NULLABLE (for sentinel values)\n 2. if a column has two children, combined with one of those children\n having a null dtype.\n\n Making the mask concept explicit seems useful. One null dtype would\n not be enough to cover both bit and byte masks, so that would mean\n even more checking if we did it the Arrow way.\n\n TBD: there's also the "chunk" concept here, which is implicit in Arrow as\n multiple buffers per array (= column here). Semantically it may make\n sense to have both: chunks were meant for example for lazy evaluation\n of data which doesn't fit in memory, while multiple buffers per column\n could also come from doing a selection operation on a single\n contiguous buffer.\n\n Given these concepts, one would expect chunks to be all of the same\n size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows),\n while multiple buffers could have data-dependent lengths. Not an issue\n in pandas if one column is backed by a single NumPy array, but in\n Arrow it seems possible.\n Are multiple chunks *and* multiple buffers per column necessary for\n the purposes of this interchange protocol, or must producers either\n reuse the chunk concept for this or copy the data?\n\n Note: this Column object can only be produced by ``__dataframe__``, so\n doesn't need its own version or ``__column__`` protocol.\n """\n\n @abstractmethod\n def size(self) -> int:\n """\n Size of the column, in elements.\n\n Corresponds to DataFrame.num_rows() if column is a single chunk;\n equal to size of this current chunk otherwise.\n """\n\n @property\n @abstractmethod\n def offset(self) -> int:\n """\n Offset of first element.\n\n May be > 0 if using chunks; for example for a column with N chunks of\n equal size M (only the last chunk may be shorter),\n ``offset = n * M``, ``n = 0 .. N-1``.\n """\n\n @property\n @abstractmethod\n def dtype(self) -> tuple[DtypeKind, int, str, str]:\n """\n Dtype description as a tuple ``(kind, bit-width, format string, endianness)``.\n\n Bit-width : the number of bits as an integer\n Format string : data type description format string in Apache Arrow C\n Data Interface format.\n Endianness : current only native endianness (``=``) is supported\n\n Notes:\n - Kind specifiers are aligned with DLPack where possible (hence the\n jump to 20, leave enough room for future extension)\n - Masks must be specified as boolean with either bit width 1 (for bit\n masks) or 8 (for byte masks).\n - Dtype width in bits was preferred over bytes\n - Endianness isn't too useful, but included now in case in the future\n we need to support non-native endianness\n - Went with Apache Arrow format strings over NumPy format strings\n because they're more complete from a dataframe perspective\n - Format strings are mostly useful for datetime specification, and\n for categoricals.\n - For categoricals, the format string describes the type of the\n categorical in the data buffer. In case of a separate encoding of\n the categorical (e.g. an integer to string mapping), this can\n be derived from ``self.describe_categorical``.\n - Data types not included: complex, Arrow-style null, binary, decimal,\n and nested (list, struct, map, union) dtypes.\n """\n\n @property\n @abstractmethod\n def describe_categorical(self) -> CategoricalDescription:\n """\n If the dtype is categorical, there are two options:\n - There are only values in the data buffer.\n - There is a separate non-categorical Column encoding for categorical values.\n\n Raises TypeError if the dtype is not categorical\n\n Returns the dictionary with description on how to interpret the data buffer:\n - "is_ordered" : bool, whether the ordering of dictionary indices is\n semantically meaningful.\n - "is_dictionary" : bool, whether a mapping of\n categorical values to other objects exists\n - "categories" : Column representing the (implicit) mapping of indices to\n category values (e.g. an array of cat1, cat2, ...).\n None if not a dictionary-style categorical.\n\n TBD: are there any other in-memory representations that are needed?\n """\n\n @property\n @abstractmethod\n def describe_null(self) -> tuple[ColumnNullType, Any]:\n """\n Return the missing value (or "null") representation the column dtype\n uses, as a tuple ``(kind, value)``.\n\n Value : if kind is "sentinel value", the actual value. If kind is a bit\n mask or a byte mask, the value (0 or 1) indicating a missing value. None\n otherwise.\n """\n\n @property\n @abstractmethod\n def null_count(self) -> int | None:\n """\n Number of null elements, if known.\n\n Note: Arrow uses -1 to indicate "unknown", but None seems cleaner.\n """\n\n @property\n @abstractmethod\n def metadata(self) -> dict[str, Any]:\n """\n The metadata for the column. See `DataFrame.metadata` for more details.\n """\n\n @abstractmethod\n def num_chunks(self) -> int:\n """\n Return the number of chunks the column consists of.\n """\n\n @abstractmethod\n def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:\n """\n Return an iterator yielding the chunks.\n\n See `DataFrame.get_chunks` for details on ``n_chunks``.\n """\n\n @abstractmethod\n def get_buffers(self) -> ColumnBuffers:\n """\n Return a dictionary containing the underlying buffers.\n\n The returned dictionary has the following contents:\n\n - "data": a two-element tuple whose first element is a buffer\n containing the data and whose second element is the data\n buffer's associated dtype.\n - "validity": a two-element tuple whose first element is a buffer\n containing mask values indicating missing data and\n whose second element is the mask value buffer's\n associated dtype. None if the null representation is\n not a bit or byte mask.\n - "offsets": a two-element tuple whose first element is a buffer\n containing the offset values for variable-size binary\n data (e.g., variable-length strings) and whose second\n element is the offsets buffer's associated dtype. None\n if the data buffer does not have an associated offsets\n buffer.\n """\n\n\n# def get_children(self) -> Iterable[Column]:\n# """\n# Children columns underneath the column, each object in this iterator\n# must adhere to the column specification.\n# """\n# pass\n\n\nclass DataFrame(ABC):\n """\n A data frame class, with only the methods required by the interchange\n protocol defined.\n\n A "data frame" represents an ordered collection of named columns.\n A column's "name" must be a unique string.\n Columns may be accessed by name or by position.\n\n This could be a public data frame class, or an object with the methods and\n attributes defined on this DataFrame class could be returned from the\n ``__dataframe__`` method of a public data frame class in a library adhering\n to the dataframe interchange protocol specification.\n """\n\n version = 0 # version of the protocol\n\n @abstractmethod\n def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):\n """Construct a new interchange object, potentially changing the parameters."""\n\n @property\n @abstractmethod\n def metadata(self) -> dict[str, Any]:\n """\n The metadata for the data frame, as a dictionary with string keys. The\n contents of `metadata` may be anything, they are meant for a library\n to store information that it needs to, e.g., roundtrip losslessly or\n for two implementations to share data that is not (yet) part of the\n interchange protocol specification. For avoiding collisions with other\n entries, please add name the keys with the name of the library\n followed by a period and the desired name, e.g, ``pandas.indexcol``.\n """\n\n @abstractmethod\n def num_columns(self) -> int:\n """\n Return the number of columns in the DataFrame.\n """\n\n @abstractmethod\n def num_rows(self) -> int | None:\n # TODO: not happy with Optional, but need to flag it may be expensive\n # why include it if it may be None - what do we expect consumers\n # to do here?\n """\n Return the number of rows in the DataFrame, if available.\n """\n\n @abstractmethod\n def num_chunks(self) -> int:\n """\n Return the number of chunks the DataFrame consists of.\n """\n\n @abstractmethod\n def column_names(self) -> Iterable[str]:\n """\n Return an iterator yielding the column names.\n """\n\n @abstractmethod\n def get_column(self, i: int) -> Column:\n """\n Return the column at the indicated position.\n """\n\n @abstractmethod\n def get_column_by_name(self, name: str) -> Column:\n """\n Return the column whose name is the indicated name.\n """\n\n @abstractmethod\n def get_columns(self) -> Iterable[Column]:\n """\n Return an iterator yielding the columns.\n """\n\n @abstractmethod\n def select_columns(self, indices: Sequence[int]) -> DataFrame:\n """\n Create a new DataFrame by selecting a subset of columns by index.\n """\n\n @abstractmethod\n def select_columns_by_name(self, names: Sequence[str]) -> DataFrame:\n """\n Create a new DataFrame by selecting a subset of columns by name.\n """\n\n @abstractmethod\n def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]:\n """\n Return an iterator yielding the chunks.\n\n By default (None), yields the chunks that the data is stored as by the\n producer. If given, ``n_chunks`` must be a multiple of\n ``self.num_chunks()``, meaning the producer must subdivide each chunk\n before yielding it.\n """\n | .venv\Lib\site-packages\pandas\core\interchange\dataframe_protocol.py | dataframe_protocol.py | Python | 16,177 | 0.95 | 0.195699 | 0.057441 | react-lib | 148 | 2023-07-31T13:58:54.890596 | GPL-3.0 | false | 38a1e1061bd9f2b94c747759c3e917bb |
from __future__ import annotations\n\nimport ctypes\nimport re\nfrom typing import Any\n\nimport numpy as np\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import SettingWithCopyError\n\nimport pandas as pd\nfrom pandas.core.interchange.dataframe_protocol import (\n Buffer,\n Column,\n ColumnNullType,\n DataFrame as DataFrameXchg,\n DtypeKind,\n)\nfrom pandas.core.interchange.utils import (\n ArrowCTypes,\n Endianness,\n)\n\n_NP_DTYPES: dict[DtypeKind, dict[int, Any]] = {\n DtypeKind.INT: {8: np.int8, 16: np.int16, 32: np.int32, 64: np.int64},\n DtypeKind.UINT: {8: np.uint8, 16: np.uint16, 32: np.uint32, 64: np.uint64},\n DtypeKind.FLOAT: {32: np.float32, 64: np.float64},\n DtypeKind.BOOL: {1: bool, 8: bool},\n}\n\n\ndef from_dataframe(df, allow_copy: bool = True) -> pd.DataFrame:\n """\n Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.\n\n .. note::\n\n For new development, we highly recommend using the Arrow C Data Interface\n alongside the Arrow PyCapsule Interface instead of the interchange protocol.\n From pandas 2.3 onwards, `from_dataframe` uses the PyCapsule Interface,\n only falling back to the interchange protocol if that fails.\n\n .. warning::\n\n Due to severe implementation issues, we recommend only considering using the\n interchange protocol in the following cases:\n\n - converting to pandas: for pandas >= 2.0.3\n - converting from pandas: for pandas >= 3.0.0\n\n Parameters\n ----------\n df : DataFrameXchg\n Object supporting the interchange protocol, i.e. `__dataframe__` method.\n allow_copy : bool, default: True\n Whether to allow copying the memory to perform the conversion\n (if false then zero-copy approach is requested).\n\n Returns\n -------\n pd.DataFrame\n\n Examples\n --------\n >>> df_not_necessarily_pandas = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})\n >>> interchange_object = df_not_necessarily_pandas.__dataframe__()\n >>> interchange_object.column_names()\n Index(['A', 'B'], dtype='object')\n >>> df_pandas = (pd.api.interchange.from_dataframe\n ... (interchange_object.select_columns_by_name(['A'])))\n >>> df_pandas\n A\n 0 1\n 1 2\n\n These methods (``column_names``, ``select_columns_by_name``) should work\n for any dataframe library which implements the interchange protocol.\n """\n if isinstance(df, pd.DataFrame):\n return df\n\n if hasattr(df, "__arrow_c_stream__"):\n try:\n pa = import_optional_dependency("pyarrow", min_version="14.0.0")\n except ImportError:\n # fallback to _from_dataframe\n pass\n else:\n try:\n return pa.table(df).to_pandas(zero_copy_only=not allow_copy)\n except pa.ArrowInvalid as e:\n raise RuntimeError(e) from e\n\n if not hasattr(df, "__dataframe__"):\n raise ValueError("`df` does not support __dataframe__")\n\n return _from_dataframe(\n df.__dataframe__(allow_copy=allow_copy), allow_copy=allow_copy\n )\n\n\ndef _from_dataframe(df: DataFrameXchg, allow_copy: bool = True):\n """\n Build a ``pd.DataFrame`` from the DataFrame interchange object.\n\n Parameters\n ----------\n df : DataFrameXchg\n Object supporting the interchange protocol, i.e. `__dataframe__` method.\n allow_copy : bool, default: True\n Whether to allow copying the memory to perform the conversion\n (if false then zero-copy approach is requested).\n\n Returns\n -------\n pd.DataFrame\n """\n pandas_dfs = []\n for chunk in df.get_chunks():\n pandas_df = protocol_df_chunk_to_pandas(chunk)\n pandas_dfs.append(pandas_df)\n\n if not allow_copy and len(pandas_dfs) > 1:\n raise RuntimeError(\n "To join chunks a copy is required which is forbidden by allow_copy=False"\n )\n if not pandas_dfs:\n pandas_df = protocol_df_chunk_to_pandas(df)\n elif len(pandas_dfs) == 1:\n pandas_df = pandas_dfs[0]\n else:\n pandas_df = pd.concat(pandas_dfs, axis=0, ignore_index=True, copy=False)\n\n index_obj = df.metadata.get("pandas.index", None)\n if index_obj is not None:\n pandas_df.index = index_obj\n\n return pandas_df\n\n\ndef protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:\n """\n Convert interchange protocol chunk to ``pd.DataFrame``.\n\n Parameters\n ----------\n df : DataFrameXchg\n\n Returns\n -------\n pd.DataFrame\n """\n columns: dict[str, Any] = {}\n buffers = [] # hold on to buffers, keeps memory alive\n for name in df.column_names():\n if not isinstance(name, str):\n raise ValueError(f"Column {name} is not a string")\n if name in columns:\n raise ValueError(f"Column {name} is not unique")\n col = df.get_column_by_name(name)\n dtype = col.dtype[0]\n if dtype in (\n DtypeKind.INT,\n DtypeKind.UINT,\n DtypeKind.FLOAT,\n DtypeKind.BOOL,\n ):\n columns[name], buf = primitive_column_to_ndarray(col)\n elif dtype == DtypeKind.CATEGORICAL:\n columns[name], buf = categorical_column_to_series(col)\n elif dtype == DtypeKind.STRING:\n columns[name], buf = string_column_to_ndarray(col)\n elif dtype == DtypeKind.DATETIME:\n columns[name], buf = datetime_column_to_ndarray(col)\n else:\n raise NotImplementedError(f"Data type {dtype} not handled yet")\n\n buffers.append(buf)\n\n pandas_df = pd.DataFrame(columns)\n pandas_df.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"] = buffers\n return pandas_df\n\n\ndef primitive_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:\n """\n Convert a column holding one of the primitive dtypes to a NumPy array.\n\n A primitive type is one of: int, uint, float, bool.\n\n Parameters\n ----------\n col : Column\n\n Returns\n -------\n tuple\n Tuple of np.ndarray holding the data and the memory owner object\n that keeps the memory alive.\n """\n buffers = col.get_buffers()\n\n data_buff, data_dtype = buffers["data"]\n data = buffer_to_ndarray(\n data_buff, data_dtype, offset=col.offset, length=col.size()\n )\n\n data = set_nulls(data, col, buffers["validity"])\n return data, buffers\n\n\ndef categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]:\n """\n Convert a column holding categorical data to a pandas Series.\n\n Parameters\n ----------\n col : Column\n\n Returns\n -------\n tuple\n Tuple of pd.Series holding the data and the memory owner object\n that keeps the memory alive.\n """\n categorical = col.describe_categorical\n\n if not categorical["is_dictionary"]:\n raise NotImplementedError("Non-dictionary categoricals not supported yet")\n\n cat_column = categorical["categories"]\n if hasattr(cat_column, "_col"):\n # Item "Column" of "Optional[Column]" has no attribute "_col"\n # Item "None" of "Optional[Column]" has no attribute "_col"\n categories = np.array(cat_column._col) # type: ignore[union-attr]\n else:\n raise NotImplementedError(\n "Interchanging categorical columns isn't supported yet, and our "\n "fallback of using the `col._col` attribute (a ndarray) failed."\n )\n buffers = col.get_buffers()\n\n codes_buff, codes_dtype = buffers["data"]\n codes = buffer_to_ndarray(\n codes_buff, codes_dtype, offset=col.offset, length=col.size()\n )\n\n # Doing module in order to not get ``IndexError`` for\n # out-of-bounds sentinel values in `codes`\n if len(categories) > 0:\n values = categories[codes % len(categories)]\n else:\n values = codes\n\n cat = pd.Categorical(\n values, categories=categories, ordered=categorical["is_ordered"]\n )\n data = pd.Series(cat)\n\n data = set_nulls(data, col, buffers["validity"])\n return data, buffers\n\n\ndef string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:\n """\n Convert a column holding string data to a NumPy array.\n\n Parameters\n ----------\n col : Column\n\n Returns\n -------\n tuple\n Tuple of np.ndarray holding the data and the memory owner object\n that keeps the memory alive.\n """\n null_kind, sentinel_val = col.describe_null\n\n if null_kind not in (\n ColumnNullType.NON_NULLABLE,\n ColumnNullType.USE_BITMASK,\n ColumnNullType.USE_BYTEMASK,\n ):\n raise NotImplementedError(\n f"{null_kind} null kind is not yet supported for string columns."\n )\n\n buffers = col.get_buffers()\n\n assert buffers["offsets"], "String buffers must contain offsets"\n # Retrieve the data buffer containing the UTF-8 code units\n data_buff, _ = buffers["data"]\n # We're going to reinterpret the buffer as uint8, so make sure we can do it safely\n assert col.dtype[2] in (\n ArrowCTypes.STRING,\n ArrowCTypes.LARGE_STRING,\n ) # format_str == utf-8\n # Convert the buffers to NumPy arrays. In order to go from STRING to\n # an equivalent ndarray, we claim that the buffer is uint8 (i.e., a byte array)\n data_dtype = (\n DtypeKind.UINT,\n 8,\n ArrowCTypes.UINT8,\n Endianness.NATIVE,\n )\n # Specify zero offset as we don't want to chunk the string data\n data = buffer_to_ndarray(data_buff, data_dtype, offset=0, length=data_buff.bufsize)\n\n # Retrieve the offsets buffer containing the index offsets demarcating\n # the beginning and the ending of each string\n offset_buff, offset_dtype = buffers["offsets"]\n # Offsets buffer contains start-stop positions of strings in the data buffer,\n # meaning that it has more elements than in the data buffer, do `col.size() + 1`\n # here to pass a proper offsets buffer size\n offsets = buffer_to_ndarray(\n offset_buff, offset_dtype, offset=col.offset, length=col.size() + 1\n )\n\n null_pos = None\n if null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK):\n validity = buffers["validity"]\n if validity is not None:\n valid_buff, valid_dtype = validity\n null_pos = buffer_to_ndarray(\n valid_buff, valid_dtype, offset=col.offset, length=col.size()\n )\n if sentinel_val == 0:\n null_pos = ~null_pos\n\n # Assemble the strings from the code units\n str_list: list[None | float | str] = [None] * col.size()\n for i in range(col.size()):\n # Check for missing values\n if null_pos is not None and null_pos[i]:\n str_list[i] = np.nan\n continue\n\n # Extract a range of code units\n units = data[offsets[i] : offsets[i + 1]]\n\n # Convert the list of code units to bytes\n str_bytes = bytes(units)\n\n # Create the string\n string = str_bytes.decode(encoding="utf-8")\n\n # Add to our list of strings\n str_list[i] = string\n\n if using_string_dtype():\n res = pd.Series(str_list, dtype="str")\n else:\n res = np.asarray(str_list, dtype="object") # type: ignore[assignment]\n\n return res, buffers # type: ignore[return-value]\n\n\ndef parse_datetime_format_str(format_str, data) -> pd.Series | np.ndarray:\n """Parse datetime `format_str` to interpret the `data`."""\n # timestamp 'ts{unit}:tz'\n timestamp_meta = re.match(r"ts([smun]):(.*)", format_str)\n if timestamp_meta:\n unit, tz = timestamp_meta.group(1), timestamp_meta.group(2)\n if unit != "s":\n # the format string describes only a first letter of the unit, so\n # add one extra letter to convert the unit to numpy-style:\n # 'm' -> 'ms', 'u' -> 'us', 'n' -> 'ns'\n unit += "s"\n data = data.astype(f"datetime64[{unit}]")\n if tz != "":\n data = pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(tz)\n return data\n\n # date 'td{Days/Ms}'\n date_meta = re.match(r"td([Dm])", format_str)\n if date_meta:\n unit = date_meta.group(1)\n if unit == "D":\n # NumPy doesn't support DAY unit, so converting days to seconds\n # (converting to uint64 to avoid overflow)\n data = (data.astype(np.uint64) * (24 * 60 * 60)).astype("datetime64[s]")\n elif unit == "m":\n data = data.astype("datetime64[ms]")\n else:\n raise NotImplementedError(f"Date unit is not supported: {unit}")\n return data\n\n raise NotImplementedError(f"DateTime kind is not supported: {format_str}")\n\n\ndef datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray | pd.Series, Any]:\n """\n Convert a column holding DateTime data to a NumPy array.\n\n Parameters\n ----------\n col : Column\n\n Returns\n -------\n tuple\n Tuple of np.ndarray holding the data and the memory owner object\n that keeps the memory alive.\n """\n buffers = col.get_buffers()\n\n _, col_bit_width, format_str, _ = col.dtype\n dbuf, _ = buffers["data"]\n # Consider dtype being `uint` to get number of units passed since the 01.01.1970\n\n data = buffer_to_ndarray(\n dbuf,\n (\n DtypeKind.INT,\n col_bit_width,\n getattr(ArrowCTypes, f"INT{col_bit_width}"),\n Endianness.NATIVE,\n ),\n offset=col.offset,\n length=col.size(),\n )\n\n data = parse_datetime_format_str(format_str, data) # type: ignore[assignment]\n data = set_nulls(data, col, buffers["validity"])\n return data, buffers\n\n\ndef buffer_to_ndarray(\n buffer: Buffer,\n dtype: tuple[DtypeKind, int, str, str],\n *,\n length: int,\n offset: int = 0,\n) -> np.ndarray:\n """\n Build a NumPy array from the passed buffer.\n\n Parameters\n ----------\n buffer : Buffer\n Buffer to build a NumPy array from.\n dtype : tuple\n Data type of the buffer conforming protocol dtypes format.\n offset : int, default: 0\n Number of elements to offset from the start of the buffer.\n length : int, optional\n If the buffer is a bit-mask, specifies a number of bits to read\n from the buffer. Has no effect otherwise.\n\n Returns\n -------\n np.ndarray\n\n Notes\n -----\n The returned array doesn't own the memory. The caller of this function is\n responsible for keeping the memory owner object alive as long as\n the returned NumPy array is being used.\n """\n kind, bit_width, _, _ = dtype\n\n column_dtype = _NP_DTYPES.get(kind, {}).get(bit_width, None)\n if column_dtype is None:\n raise NotImplementedError(f"Conversion for {dtype} is not yet supported.")\n\n # TODO: No DLPack yet, so need to construct a new ndarray from the data pointer\n # and size in the buffer plus the dtype on the column. Use DLPack as NumPy supports\n # it since https://github.com/numpy/numpy/pull/19083\n ctypes_type = np.ctypeslib.as_ctypes_type(column_dtype)\n\n if bit_width == 1:\n assert length is not None, "`length` must be specified for a bit-mask buffer."\n pa = import_optional_dependency("pyarrow")\n arr = pa.BooleanArray.from_buffers(\n pa.bool_(),\n length,\n [None, pa.foreign_buffer(buffer.ptr, length)],\n offset=offset,\n )\n return np.asarray(arr)\n else:\n data_pointer = ctypes.cast(\n buffer.ptr + (offset * bit_width // 8), ctypes.POINTER(ctypes_type)\n )\n if length > 0:\n return np.ctypeslib.as_array(data_pointer, shape=(length,))\n return np.array([], dtype=ctypes_type)\n\n\ndef set_nulls(\n data: np.ndarray | pd.Series,\n col: Column,\n validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None,\n allow_modify_inplace: bool = True,\n):\n """\n Set null values for the data according to the column null kind.\n\n Parameters\n ----------\n data : np.ndarray or pd.Series\n Data to set nulls in.\n col : Column\n Column object that describes the `data`.\n validity : tuple(Buffer, dtype) or None\n The return value of ``col.buffers()``. We do not access the ``col.buffers()``\n here to not take the ownership of the memory of buffer objects.\n allow_modify_inplace : bool, default: True\n Whether to modify the `data` inplace when zero-copy is possible (True) or always\n modify a copy of the `data` (False).\n\n Returns\n -------\n np.ndarray or pd.Series\n Data with the nulls being set.\n """\n if validity is None:\n return data\n null_kind, sentinel_val = col.describe_null\n null_pos = None\n\n if null_kind == ColumnNullType.USE_SENTINEL:\n null_pos = pd.Series(data) == sentinel_val\n elif null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK):\n assert validity, "Expected to have a validity buffer for the mask"\n valid_buff, valid_dtype = validity\n null_pos = buffer_to_ndarray(\n valid_buff, valid_dtype, offset=col.offset, length=col.size()\n )\n if sentinel_val == 0:\n null_pos = ~null_pos\n elif null_kind in (ColumnNullType.NON_NULLABLE, ColumnNullType.USE_NAN):\n pass\n else:\n raise NotImplementedError(f"Null kind {null_kind} is not yet supported.")\n\n if null_pos is not None and np.any(null_pos):\n if not allow_modify_inplace:\n data = data.copy()\n try:\n data[null_pos] = None\n except TypeError:\n # TypeError happens if the `data` dtype appears to be non-nullable\n # in numpy notation (bool, int, uint). If this happens,\n # cast the `data` to nullable float dtype.\n data = data.astype(float)\n data[null_pos] = None\n except SettingWithCopyError:\n # `SettingWithCopyError` may happen for datetime-like with missing values.\n data = data.copy()\n data[null_pos] = None\n\n return data\n | .venv\Lib\site-packages\pandas\core\interchange\from_dataframe.py | from_dataframe.py | Python | 18,077 | 0.95 | 0.114901 | 0.07957 | vue-tools | 131 | 2024-08-11T00:54:18.440570 | MIT | false | 2d499cce2cb9ed4e31d4be15df6f9d35 |
"""\nUtility functions and objects for implementing the interchange API.\n"""\n\nfrom __future__ import annotations\n\nimport typing\n\nimport numpy as np\n\nfrom pandas._libs import lib\n\nfrom pandas.core.dtypes.dtypes import (\n ArrowDtype,\n CategoricalDtype,\n DatetimeTZDtype,\n)\n\nimport pandas as pd\n\nif typing.TYPE_CHECKING:\n from pandas._typing import DtypeObj\n\n\n# Maps str(pyarrow.DataType) = C type format string\n# Currently, no pyarrow API for this\nPYARROW_CTYPES = {\n "null": "n",\n "bool": "b",\n "uint8": "C",\n "uint16": "S",\n "uint32": "I",\n "uint64": "L",\n "int8": "c",\n "int16": "S",\n "int32": "i",\n "int64": "l",\n "halffloat": "e", # float16\n "float": "f", # float32\n "double": "g", # float64\n "string": "u",\n "large_string": "U",\n "binary": "z",\n "time32[s]": "tts",\n "time32[ms]": "ttm",\n "time64[us]": "ttu",\n "time64[ns]": "ttn",\n "date32[day]": "tdD",\n "date64[ms]": "tdm",\n "timestamp[s]": "tss:",\n "timestamp[ms]": "tsm:",\n "timestamp[us]": "tsu:",\n "timestamp[ns]": "tsn:",\n "duration[s]": "tDs",\n "duration[ms]": "tDm",\n "duration[us]": "tDu",\n "duration[ns]": "tDn",\n}\n\n\nclass ArrowCTypes:\n """\n Enum for Apache Arrow C type format strings.\n\n The Arrow C data interface:\n https://arrow.apache.org/docs/format/CDataInterface.html#data-type-description-format-strings\n """\n\n NULL = "n"\n BOOL = "b"\n INT8 = "c"\n UINT8 = "C"\n INT16 = "s"\n UINT16 = "S"\n INT32 = "i"\n UINT32 = "I"\n INT64 = "l"\n UINT64 = "L"\n FLOAT16 = "e"\n FLOAT32 = "f"\n FLOAT64 = "g"\n STRING = "u" # utf-8\n LARGE_STRING = "U" # utf-8\n DATE32 = "tdD"\n DATE64 = "tdm"\n # Resoulution:\n # - seconds -> 's'\n # - milliseconds -> 'm'\n # - microseconds -> 'u'\n # - nanoseconds -> 'n'\n TIMESTAMP = "ts{resolution}:{tz}"\n TIME = "tt{resolution}"\n\n\nclass Endianness:\n """Enum indicating the byte-order of a data-type."""\n\n LITTLE = "<"\n BIG = ">"\n NATIVE = "="\n NA = "|"\n\n\ndef dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str:\n """\n Represent pandas `dtype` as a format string in Apache Arrow C notation.\n\n Parameters\n ----------\n dtype : np.dtype\n Datatype of pandas DataFrame to represent.\n\n Returns\n -------\n str\n Format string in Apache Arrow C notation of the given `dtype`.\n """\n if isinstance(dtype, CategoricalDtype):\n return ArrowCTypes.INT64\n elif dtype == np.dtype("O"):\n return ArrowCTypes.STRING\n elif isinstance(dtype, ArrowDtype):\n import pyarrow as pa\n\n pa_type = dtype.pyarrow_dtype\n if pa.types.is_decimal(pa_type):\n return f"d:{pa_type.precision},{pa_type.scale}"\n elif pa.types.is_timestamp(pa_type) and pa_type.tz is not None:\n return f"ts{pa_type.unit[0]}:{pa_type.tz}"\n format_str = PYARROW_CTYPES.get(str(pa_type), None)\n if format_str is not None:\n return format_str\n\n format_str = getattr(ArrowCTypes, dtype.name.upper(), None)\n if format_str is not None:\n return format_str\n\n if isinstance(dtype, pd.StringDtype):\n # TODO(infer_string) this should be LARGE_STRING for pyarrow storage,\n # but current tests don't cover this distinction\n return ArrowCTypes.STRING\n\n elif lib.is_np_dtype(dtype, "M"):\n # Selecting the first char of resolution string:\n # dtype.str -> '<M8[ns]' -> 'n'\n resolution = np.datetime_data(dtype)[0][0]\n return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="")\n\n elif isinstance(dtype, DatetimeTZDtype):\n return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz)\n\n elif isinstance(dtype, pd.BooleanDtype):\n return ArrowCTypes.BOOL\n\n raise NotImplementedError(\n f"Conversion of {dtype} to Arrow C format string is not implemented."\n )\n\n\ndef maybe_rechunk(series: pd.Series, *, allow_copy: bool) -> pd.Series | None:\n """\n Rechunk a multi-chunk pyarrow array into a single-chunk array, if necessary.\n\n - Returns `None` if the input series is not backed by a multi-chunk pyarrow array\n (and so doesn't need rechunking)\n - Returns a single-chunk-backed-Series if the input is backed by a multi-chunk\n pyarrow array and `allow_copy` is `True`.\n - Raises a `RuntimeError` if `allow_copy` is `False` and input is a\n based by a multi-chunk pyarrow array.\n """\n if not isinstance(series.dtype, pd.ArrowDtype):\n return None\n chunked_array = series.array._pa_array # type: ignore[attr-defined]\n if len(chunked_array.chunks) == 1:\n return None\n if not allow_copy:\n raise RuntimeError(\n "Found multi-chunk pyarrow array, but `allow_copy` is False. "\n "Please rechunk the array before calling this function, or set "\n "`allow_copy=True`."\n )\n arr = chunked_array.combine_chunks()\n return pd.Series(arr, dtype=series.dtype, name=series.name, index=series.index)\n | .venv\Lib\site-packages\pandas\core\interchange\utils.py | utils.py | Python | 5,051 | 0.95 | 0.120219 | 0.071895 | react-lib | 527 | 2024-06-27T05:58:14.239669 | GPL-3.0 | false | f494f2c67402a33e43bc9ff6d0f25567 |
\n\n | .venv\Lib\site-packages\pandas\core\interchange\__pycache__\buffer.cpython-313.pyc | buffer.cpython-313.pyc | Other | 4,659 | 0.8 | 0.032258 | 0 | python-kit | 75 | 2025-01-28T03:59:18.007055 | BSD-3-Clause | false | 357694e9bbb9159788a7c212f65629d5 |
\n\n | .venv\Lib\site-packages\pandas\core\interchange\__pycache__\column.cpython-313.pyc | column.cpython-313.pyc | Other | 19,026 | 0.95 | 0.07027 | 0.011173 | awesome-app | 428 | 2025-03-21T12:07:25.699613 | GPL-3.0 | false | f9a306b760382e077f5978126a5b5316 |
\n\n | .venv\Lib\site-packages\pandas\core\interchange\__pycache__\dataframe.cpython-313.pyc | dataframe.cpython-313.pyc | Other | 6,339 | 0.95 | 0.059701 | 0 | vue-tools | 919 | 2024-10-21T08:14:15.162523 | MIT | false | a1c9c95d66b0e6784824081578137462 |
\n\n | .venv\Lib\site-packages\pandas\core\interchange\__pycache__\dataframe_protocol.cpython-313.pyc | dataframe_protocol.cpython-313.pyc | Other | 17,713 | 0.95 | 0.154321 | 0 | awesome-app | 800 | 2024-08-14T20:37:42.199780 | Apache-2.0 | false | 30db5d1d00e13ada2e0113929cd31837 |
\n\n | .venv\Lib\site-packages\pandas\core\interchange\__pycache__\from_dataframe.cpython-313.pyc | from_dataframe.cpython-313.pyc | Other | 19,082 | 0.95 | 0.044218 | 0 | react-lib | 278 | 2023-09-25T07:11:55.366220 | MIT | false | 47777a0a26494f50f801ecaac2d530b8 |
\n\n | .venv\Lib\site-packages\pandas\core\interchange\__pycache__\utils.cpython-313.pyc | utils.cpython-313.pyc | Other | 6,563 | 0.95 | 0.060345 | 0 | node-utils | 825 | 2025-05-08T15:12:42.522621 | GPL-3.0 | false | d9b7492d4920c490ed78904c44702640 |
\n\n | .venv\Lib\site-packages\pandas\core\interchange\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 198 | 0.7 | 0 | 0 | python-kit | 754 | 2024-09-01T20:56:18.079359 | GPL-3.0 | false | 9a43e139d4d6061ba2555540cd89a005 |
"""\nThis is a pseudo-public API for downstream libraries. We ask that downstream\nauthors\n\n1) Try to avoid using internals directly altogether, and failing that,\n2) Use only functions exposed here (or in core.internals)\n\n"""\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas._libs.internals import BlockPlacement\n\nfrom pandas.core.dtypes.common import pandas_dtype\nfrom pandas.core.dtypes.dtypes import (\n DatetimeTZDtype,\n PeriodDtype,\n)\n\nfrom pandas.core.arrays import DatetimeArray\nfrom pandas.core.construction import extract_array\nfrom pandas.core.internals.blocks import (\n check_ndim,\n ensure_block_shape,\n extract_pandas_array,\n get_block_type,\n maybe_coerce_values,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import Dtype\n\n from pandas.core.internals.blocks import Block\n\n\ndef make_block(\n values, placement, klass=None, ndim=None, dtype: Dtype | None = None\n) -> Block:\n """\n This is a pseudo-public analogue to blocks.new_block.\n\n We ask that downstream libraries use this rather than any fully-internal\n APIs, including but not limited to:\n\n - core.internals.blocks.make_block\n - Block.make_block\n - Block.make_block_same_class\n - Block.__init__\n """\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n values, dtype = extract_pandas_array(values, dtype, ndim)\n\n from pandas.core.internals.blocks import (\n DatetimeTZBlock,\n ExtensionBlock,\n )\n\n if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype):\n # GH-44681 changed PeriodArray to be stored in the 2D\n # NDArrayBackedExtensionBlock instead of ExtensionBlock\n # -> still allow ExtensionBlock to be passed in this case for back compat\n klass = None\n\n if klass is None:\n dtype = dtype or values.dtype\n klass = get_block_type(dtype)\n\n elif klass is DatetimeTZBlock and not isinstance(values.dtype, DatetimeTZDtype):\n # pyarrow calls get here\n values = DatetimeArray._simple_new(\n # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has\n # incompatible type "Union[ExtensionDtype, dtype[Any], None]";\n # expected "Union[dtype[datetime64], DatetimeTZDtype]"\n values,\n dtype=dtype, # type: ignore[arg-type]\n )\n\n if not isinstance(placement, BlockPlacement):\n placement = BlockPlacement(placement)\n\n ndim = maybe_infer_ndim(values, placement, ndim)\n if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)):\n # GH#41168 ensure we can pass 1D dt64tz values\n # More generally, any EA dtype that isn't is_1d_only_ea_dtype\n values = extract_array(values, extract_numpy=True)\n values = ensure_block_shape(values, ndim)\n\n check_ndim(values, placement, ndim)\n values = maybe_coerce_values(values)\n return klass(values, ndim=ndim, placement=placement)\n\n\ndef maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int:\n """\n If `ndim` is not provided, infer it from placement and values.\n """\n if ndim is None:\n # GH#38134 Block constructor now assumes ndim is not None\n if not isinstance(values.dtype, np.dtype):\n if len(placement) != 1:\n ndim = 1\n else:\n ndim = 2\n else:\n ndim = values.ndim\n return ndim\n\n\ndef __getattr__(name: str):\n # GH#55139\n import warnings\n\n if name in [\n "Block",\n "ExtensionBlock",\n "DatetimeTZBlock",\n "create_block_manager_from_blocks",\n ]:\n # GH#33892\n warnings.warn(\n f"{name} is deprecated and will be removed in a future version. "\n "Use public APIs instead.",\n DeprecationWarning,\n # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758\n # on hard-coding stacklevel\n stacklevel=2,\n )\n\n if name == "create_block_manager_from_blocks":\n from pandas.core.internals.managers import create_block_manager_from_blocks\n\n return create_block_manager_from_blocks\n\n elif name == "Block":\n from pandas.core.internals.blocks import Block\n\n return Block\n\n elif name == "DatetimeTZBlock":\n from pandas.core.internals.blocks import DatetimeTZBlock\n\n return DatetimeTZBlock\n\n elif name == "ExtensionBlock":\n from pandas.core.internals.blocks import ExtensionBlock\n\n return ExtensionBlock\n\n raise AttributeError(\n f"module 'pandas.core.internals.api' has no attribute '{name}'"\n )\n | .venv\Lib\site-packages\pandas\core\internals\api.py | api.py | Python | 4,695 | 0.95 | 0.102564 | 0.115702 | python-kit | 987 | 2024-12-23T23:09:37.853898 | MIT | false | ea54c8e1a385b4fb9ed0a89f5fbfec33 |
"""\nExperimental manager based on storing a collection of 1D arrays\n"""\nfrom __future__ import annotations\n\nimport itertools\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Literal,\n)\n\nimport numpy as np\n\nfrom pandas._libs import (\n NaT,\n lib,\n)\n\nfrom pandas.core.dtypes.astype import (\n astype_array,\n astype_array_safe,\n)\nfrom pandas.core.dtypes.cast import (\n ensure_dtype_can_hold_na,\n find_common_type,\n infer_dtype_from_scalar,\n np_find_common_type,\n)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_datetime64_ns_dtype,\n is_integer,\n is_numeric_dtype,\n is_object_dtype,\n is_timedelta64_ns_dtype,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import (\n array_equals,\n isna,\n na_value_for_dtype,\n)\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.array_algos.quantile import quantile_compat\nfrom pandas.core.array_algos.take import take_1d\nfrom pandas.core.arrays import (\n DatetimeArray,\n ExtensionArray,\n NumpyExtensionArray,\n TimedeltaArray,\n)\nfrom pandas.core.construction import (\n ensure_wrapped_if_datetimelike,\n extract_array,\n sanitize_array,\n)\nfrom pandas.core.indexers import (\n maybe_convert_indices,\n validate_indices,\n)\nfrom pandas.core.indexes.api import (\n Index,\n ensure_index,\n)\nfrom pandas.core.indexes.base import get_values_for_csv\nfrom pandas.core.internals.base import (\n DataManager,\n SingleDataManager,\n ensure_np_dtype,\n interleaved_dtype,\n)\nfrom pandas.core.internals.blocks import (\n BlockPlacement,\n ensure_block_shape,\n external_values,\n extract_pandas_array,\n maybe_coerce_values,\n new_block,\n)\nfrom pandas.core.internals.managers import make_na_array\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n from pandas._typing import (\n ArrayLike,\n AxisInt,\n DtypeObj,\n QuantileInterpolation,\n Self,\n npt,\n )\n\n\nclass BaseArrayManager(DataManager):\n """\n Core internal data structure to implement DataFrame and Series.\n\n Alternative to the BlockManager, storing a list of 1D arrays instead of\n Blocks.\n\n This is *not* a public API class\n\n Parameters\n ----------\n arrays : Sequence of arrays\n axes : Sequence of Index\n verify_integrity : bool, default True\n\n """\n\n __slots__ = [\n "_axes", # private attribute, because 'axes' has different order, see below\n "arrays",\n ]\n\n arrays: list[np.ndarray | ExtensionArray]\n _axes: list[Index]\n\n def __init__(\n self,\n arrays: list[np.ndarray | ExtensionArray],\n axes: list[Index],\n verify_integrity: bool = True,\n ) -> None:\n raise NotImplementedError\n\n def make_empty(self, axes=None) -> Self:\n """Return an empty ArrayManager with the items axis of len 0 (no columns)"""\n if axes is None:\n axes = [self.axes[1:], Index([])]\n\n arrays: list[np.ndarray | ExtensionArray] = []\n return type(self)(arrays, axes)\n\n @property\n def items(self) -> Index:\n return self._axes[-1]\n\n @property\n # error: Signature of "axes" incompatible with supertype "DataManager"\n def axes(self) -> list[Index]: # type: ignore[override]\n # mypy doesn't work to override attribute with property\n # see https://github.com/python/mypy/issues/4125\n """Axes is BlockManager-compatible order (columns, rows)"""\n return [self._axes[1], self._axes[0]]\n\n @property\n def shape_proper(self) -> tuple[int, ...]:\n # this returns (n_rows, n_columns)\n return tuple(len(ax) for ax in self._axes)\n\n @staticmethod\n def _normalize_axis(axis: AxisInt) -> int:\n # switch axis\n axis = 1 if axis == 0 else 0\n return axis\n\n def set_axis(self, axis: AxisInt, new_labels: Index) -> None:\n # Caller is responsible for ensuring we have an Index object.\n self._validate_set_axis(axis, new_labels)\n axis = self._normalize_axis(axis)\n self._axes[axis] = new_labels\n\n def get_dtypes(self) -> npt.NDArray[np.object_]:\n return np.array([arr.dtype for arr in self.arrays], dtype="object")\n\n def add_references(self, mgr: BaseArrayManager) -> None:\n """\n Only implemented on the BlockManager level\n """\n return\n\n def __getstate__(self):\n return self.arrays, self._axes\n\n def __setstate__(self, state) -> None:\n self.arrays = state[0]\n self._axes = state[1]\n\n def __repr__(self) -> str:\n output = type(self).__name__\n output += f"\nIndex: {self._axes[0]}"\n if self.ndim == 2:\n output += f"\nColumns: {self._axes[1]}"\n output += f"\n{len(self.arrays)} arrays:"\n for arr in self.arrays:\n output += f"\n{arr.dtype}"\n return output\n\n def apply(\n self,\n f,\n align_keys: list[str] | None = None,\n **kwargs,\n ) -> Self:\n """\n Iterate over the arrays, collect and create a new ArrayManager.\n\n Parameters\n ----------\n f : str or callable\n Name of the Array method to apply.\n align_keys: List[str] or None, default None\n **kwargs\n Keywords to pass to `f`\n\n Returns\n -------\n ArrayManager\n """\n assert "filter" not in kwargs\n\n align_keys = align_keys or []\n result_arrays: list[ArrayLike] = []\n # fillna: Series/DataFrame is responsible for making sure value is aligned\n\n aligned_args = {k: kwargs[k] for k in align_keys}\n\n if f == "apply":\n f = kwargs.pop("func")\n\n for i, arr in enumerate(self.arrays):\n if aligned_args:\n for k, obj in aligned_args.items():\n if isinstance(obj, (ABCSeries, ABCDataFrame)):\n # The caller is responsible for ensuring that\n # obj.axes[-1].equals(self.items)\n if obj.ndim == 1:\n kwargs[k] = obj.iloc[i]\n else:\n kwargs[k] = obj.iloc[:, i]._values\n else:\n # otherwise we have an array-like\n kwargs[k] = obj[i]\n\n if callable(f):\n applied = f(arr, **kwargs)\n else:\n applied = getattr(arr, f)(**kwargs)\n\n result_arrays.append(applied)\n\n new_axes = self._axes\n return type(self)(result_arrays, new_axes)\n\n def apply_with_block(self, f, align_keys=None, **kwargs) -> Self:\n # switch axis to follow BlockManager logic\n swap_axis = True\n if f == "interpolate":\n swap_axis = False\n if swap_axis and "axis" in kwargs and self.ndim == 2:\n kwargs["axis"] = 1 if kwargs["axis"] == 0 else 0\n\n align_keys = align_keys or []\n aligned_args = {k: kwargs[k] for k in align_keys}\n\n result_arrays = []\n\n for i, arr in enumerate(self.arrays):\n if aligned_args:\n for k, obj in aligned_args.items():\n if isinstance(obj, (ABCSeries, ABCDataFrame)):\n # The caller is responsible for ensuring that\n # obj.axes[-1].equals(self.items)\n if obj.ndim == 1:\n if self.ndim == 2:\n kwargs[k] = obj.iloc[slice(i, i + 1)]._values\n else:\n kwargs[k] = obj.iloc[:]._values\n else:\n kwargs[k] = obj.iloc[:, [i]]._values\n else:\n # otherwise we have an ndarray\n if obj.ndim == 2:\n kwargs[k] = obj[[i]]\n\n if isinstance(arr.dtype, np.dtype) and not isinstance(arr, np.ndarray):\n # i.e. TimedeltaArray, DatetimeArray with tz=None. Need to\n # convert for the Block constructors.\n arr = np.asarray(arr)\n\n arr = maybe_coerce_values(arr)\n if self.ndim == 2:\n arr = ensure_block_shape(arr, 2)\n bp = BlockPlacement(slice(0, 1, 1))\n block = new_block(arr, placement=bp, ndim=2)\n else:\n bp = BlockPlacement(slice(0, len(self), 1))\n block = new_block(arr, placement=bp, ndim=1)\n\n applied = getattr(block, f)(**kwargs)\n if isinstance(applied, list):\n applied = applied[0]\n arr = applied.values\n if self.ndim == 2 and arr.ndim == 2:\n # 2D for np.ndarray or DatetimeArray/TimedeltaArray\n assert len(arr) == 1\n # error: No overload variant of "__getitem__" of "ExtensionArray"\n # matches argument type "Tuple[int, slice]"\n arr = arr[0, :] # type: ignore[call-overload]\n result_arrays.append(arr)\n\n return type(self)(result_arrays, self._axes)\n\n def setitem(self, indexer, value, warn: bool = True) -> Self:\n return self.apply_with_block("setitem", indexer=indexer, value=value)\n\n def diff(self, n: int) -> Self:\n assert self.ndim == 2 # caller ensures\n return self.apply(algos.diff, n=n)\n\n def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self:\n if copy is None:\n copy = True\n\n return self.apply(astype_array_safe, dtype=dtype, copy=copy, errors=errors)\n\n def convert(self, copy: bool | None) -> Self:\n if copy is None:\n copy = True\n\n def _convert(arr):\n if is_object_dtype(arr.dtype):\n # extract NumpyExtensionArray for tests that patch\n # NumpyExtensionArray._typ\n arr = np.asarray(arr)\n result = lib.maybe_convert_objects(\n arr,\n convert_non_numeric=True,\n )\n if result is arr and copy:\n return arr.copy()\n return result\n else:\n return arr.copy() if copy else arr\n\n return self.apply(_convert)\n\n def get_values_for_csv(\n self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None\n ) -> Self:\n return self.apply(\n get_values_for_csv,\n na_rep=na_rep,\n quoting=quoting,\n float_format=float_format,\n date_format=date_format,\n decimal=decimal,\n )\n\n @property\n def any_extension_types(self) -> bool:\n """Whether any of the blocks in this manager are extension blocks"""\n return False # any(block.is_extension for block in self.blocks)\n\n @property\n def is_view(self) -> bool:\n """return a boolean if we are a single block and are a view"""\n # TODO what is this used for?\n return False\n\n @property\n def is_single_block(self) -> bool:\n return len(self.arrays) == 1\n\n def _get_data_subset(self, predicate: Callable) -> Self:\n indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)]\n arrays = [self.arrays[i] for i in indices]\n # TODO copy?\n # Note: using Index.take ensures we can retain e.g. DatetimeIndex.freq,\n # see test_describe_datetime_columns\n taker = np.array(indices, dtype="intp")\n new_cols = self._axes[1].take(taker)\n new_axes = [self._axes[0], new_cols]\n return type(self)(arrays, new_axes, verify_integrity=False)\n\n def get_bool_data(self, copy: bool = False) -> Self:\n """\n Select columns that are bool-dtype and object-dtype columns that are all-bool.\n\n Parameters\n ----------\n copy : bool, default False\n Whether to copy the blocks\n """\n return self._get_data_subset(lambda x: x.dtype == np.dtype(bool))\n\n def get_numeric_data(self, copy: bool = False) -> Self:\n """\n Select columns that have a numeric dtype.\n\n Parameters\n ----------\n copy : bool, default False\n Whether to copy the blocks\n """\n return self._get_data_subset(\n lambda arr: is_numeric_dtype(arr.dtype)\n or getattr(arr.dtype, "_is_numeric", False)\n )\n\n def copy(self, deep: bool | Literal["all"] | None = True) -> Self:\n """\n Make deep or shallow copy of ArrayManager\n\n Parameters\n ----------\n deep : bool or string, default True\n If False, return shallow copy (do not copy data)\n If 'all', copy data and a deep copy of the index\n\n Returns\n -------\n BlockManager\n """\n if deep is None:\n # ArrayManager does not yet support CoW, so deep=None always means\n # deep=True for now\n deep = True\n\n # this preserves the notion of view copying of axes\n if deep:\n # hit in e.g. tests.io.json.test_pandas\n\n def copy_func(ax):\n return ax.copy(deep=True) if deep == "all" else ax.view()\n\n new_axes = [copy_func(ax) for ax in self._axes]\n else:\n new_axes = list(self._axes)\n\n if deep:\n new_arrays = [arr.copy() for arr in self.arrays]\n else:\n new_arrays = list(self.arrays)\n return type(self)(new_arrays, new_axes, verify_integrity=False)\n\n def reindex_indexer(\n self,\n new_axis,\n indexer,\n axis: AxisInt,\n fill_value=None,\n allow_dups: bool = False,\n copy: bool | None = True,\n # ignored keywords\n only_slice: bool = False,\n # ArrayManager specific keywords\n use_na_proxy: bool = False,\n ) -> Self:\n axis = self._normalize_axis(axis)\n return self._reindex_indexer(\n new_axis,\n indexer,\n axis,\n fill_value,\n allow_dups,\n copy,\n use_na_proxy,\n )\n\n def _reindex_indexer(\n self,\n new_axis,\n indexer: npt.NDArray[np.intp] | None,\n axis: AxisInt,\n fill_value=None,\n allow_dups: bool = False,\n copy: bool | None = True,\n use_na_proxy: bool = False,\n ) -> Self:\n """\n Parameters\n ----------\n new_axis : Index\n indexer : ndarray[intp] or None\n axis : int\n fill_value : object, default None\n allow_dups : bool, default False\n copy : bool, default True\n\n\n pandas-indexer with -1's only.\n """\n if copy is None:\n # ArrayManager does not yet support CoW, so deep=None always means\n # deep=True for now\n copy = True\n\n if indexer is None:\n if new_axis is self._axes[axis] and not copy:\n return self\n\n result = self.copy(deep=copy)\n result._axes = list(self._axes)\n result._axes[axis] = new_axis\n return result\n\n # some axes don't allow reindexing with dups\n if not allow_dups:\n self._axes[axis]._validate_can_reindex(indexer)\n\n if axis >= self.ndim:\n raise IndexError("Requested axis not found in manager")\n\n if axis == 1:\n new_arrays = []\n for i in indexer:\n if i == -1:\n arr = self._make_na_array(\n fill_value=fill_value, use_na_proxy=use_na_proxy\n )\n else:\n arr = self.arrays[i]\n if copy:\n arr = arr.copy()\n new_arrays.append(arr)\n\n else:\n validate_indices(indexer, len(self._axes[0]))\n indexer = ensure_platform_int(indexer)\n mask = indexer == -1\n needs_masking = mask.any()\n new_arrays = [\n take_1d(\n arr,\n indexer,\n allow_fill=needs_masking,\n fill_value=fill_value,\n mask=mask,\n # if fill_value is not None else blk.fill_value\n )\n for arr in self.arrays\n ]\n\n new_axes = list(self._axes)\n new_axes[axis] = new_axis\n\n return type(self)(new_arrays, new_axes, verify_integrity=False)\n\n def take(\n self,\n indexer: npt.NDArray[np.intp],\n axis: AxisInt = 1,\n verify: bool = True,\n ) -> Self:\n """\n Take items along any axis.\n """\n assert isinstance(indexer, np.ndarray), type(indexer)\n assert indexer.dtype == np.intp, indexer.dtype\n\n axis = self._normalize_axis(axis)\n\n if not indexer.ndim == 1:\n raise ValueError("indexer should be 1-dimensional")\n\n n = self.shape_proper[axis]\n indexer = maybe_convert_indices(indexer, n, verify=verify)\n\n new_labels = self._axes[axis].take(indexer)\n return self._reindex_indexer(\n new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True\n )\n\n def _make_na_array(self, fill_value=None, use_na_proxy: bool = False):\n if use_na_proxy:\n assert fill_value is None\n return NullArrayProxy(self.shape_proper[0])\n\n if fill_value is None:\n fill_value = np.nan\n\n dtype, fill_value = infer_dtype_from_scalar(fill_value)\n array_values = make_na_array(dtype, self.shape_proper[:1], fill_value)\n return array_values\n\n def _equal_values(self, other) -> bool:\n """\n Used in .equals defined in base class. Only check the column values\n assuming shape and indexes have already been checked.\n """\n for left, right in zip(self.arrays, other.arrays):\n if not array_equals(left, right):\n return False\n return True\n\n # TODO\n # to_dict\n\n\nclass ArrayManager(BaseArrayManager):\n @property\n def ndim(self) -> Literal[2]:\n return 2\n\n def __init__(\n self,\n arrays: list[np.ndarray | ExtensionArray],\n axes: list[Index],\n verify_integrity: bool = True,\n ) -> None:\n # Note: we are storing the axes in "_axes" in the (row, columns) order\n # which contrasts the order how it is stored in BlockManager\n self._axes = axes\n self.arrays = arrays\n\n if verify_integrity:\n self._axes = [ensure_index(ax) for ax in axes]\n arrays = [extract_pandas_array(x, None, 1)[0] for x in arrays]\n self.arrays = [maybe_coerce_values(arr) for arr in arrays]\n self._verify_integrity()\n\n def _verify_integrity(self) -> None:\n n_rows, n_columns = self.shape_proper\n if not len(self.arrays) == n_columns:\n raise ValueError(\n "Number of passed arrays must equal the size of the column Index: "\n f"{len(self.arrays)} arrays vs {n_columns} columns."\n )\n for arr in self.arrays:\n if not len(arr) == n_rows:\n raise ValueError(\n "Passed arrays should have the same length as the rows Index: "\n f"{len(arr)} vs {n_rows} rows"\n )\n if not isinstance(arr, (np.ndarray, ExtensionArray)):\n raise ValueError(\n "Passed arrays should be np.ndarray or ExtensionArray instances, "\n f"got {type(arr)} instead"\n )\n if not arr.ndim == 1:\n raise ValueError(\n "Passed arrays should be 1-dimensional, got array with "\n f"{arr.ndim} dimensions instead."\n )\n\n # --------------------------------------------------------------------\n # Indexing\n\n def fast_xs(self, loc: int) -> SingleArrayManager:\n """\n Return the array corresponding to `frame.iloc[loc]`.\n\n Parameters\n ----------\n loc : int\n\n Returns\n -------\n np.ndarray or ExtensionArray\n """\n dtype = interleaved_dtype([arr.dtype for arr in self.arrays])\n\n values = [arr[loc] for arr in self.arrays]\n if isinstance(dtype, ExtensionDtype):\n result = dtype.construct_array_type()._from_sequence(values, dtype=dtype)\n # for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT\n elif is_datetime64_ns_dtype(dtype):\n result = DatetimeArray._from_sequence(values, dtype=dtype)._ndarray\n elif is_timedelta64_ns_dtype(dtype):\n result = TimedeltaArray._from_sequence(values, dtype=dtype)._ndarray\n else:\n result = np.array(values, dtype=dtype)\n return SingleArrayManager([result], [self._axes[1]])\n\n def get_slice(self, slobj: slice, axis: AxisInt = 0) -> ArrayManager:\n axis = self._normalize_axis(axis)\n\n if axis == 0:\n arrays = [arr[slobj] for arr in self.arrays]\n elif axis == 1:\n arrays = self.arrays[slobj]\n\n new_axes = list(self._axes)\n new_axes[axis] = new_axes[axis]._getitem_slice(slobj)\n\n return type(self)(arrays, new_axes, verify_integrity=False)\n\n def iget(self, i: int) -> SingleArrayManager:\n """\n Return the data as a SingleArrayManager.\n """\n values = self.arrays[i]\n return SingleArrayManager([values], [self._axes[0]])\n\n def iget_values(self, i: int) -> ArrayLike:\n """\n Return the data for column i as the values (ndarray or ExtensionArray).\n """\n return self.arrays[i]\n\n @property\n def column_arrays(self) -> list[ArrayLike]:\n """\n Used in the JSON C code to access column arrays.\n """\n\n return [np.asarray(arr) for arr in self.arrays]\n\n def iset(\n self,\n loc: int | slice | np.ndarray,\n value: ArrayLike,\n inplace: bool = False,\n refs=None,\n ) -> None:\n """\n Set new column(s).\n\n This changes the ArrayManager in-place, but replaces (an) existing\n column(s), not changing column values in-place).\n\n Parameters\n ----------\n loc : integer, slice or boolean mask\n Positional location (already bounds checked)\n value : np.ndarray or ExtensionArray\n inplace : bool, default False\n Whether overwrite existing array as opposed to replacing it.\n """\n # single column -> single integer index\n if lib.is_integer(loc):\n # TODO can we avoid needing to unpack this here? That means converting\n # DataFrame into 1D array when loc is an integer\n if isinstance(value, np.ndarray) and value.ndim == 2:\n assert value.shape[1] == 1\n value = value[:, 0]\n\n # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item\n # but we should avoid that and pass directly the proper array\n value = maybe_coerce_values(value)\n\n assert isinstance(value, (np.ndarray, ExtensionArray))\n assert value.ndim == 1\n assert len(value) == len(self._axes[0])\n self.arrays[loc] = value\n return\n\n # multiple columns -> convert slice or array to integer indices\n elif isinstance(loc, slice):\n indices: range | np.ndarray = range(\n loc.start if loc.start is not None else 0,\n loc.stop if loc.stop is not None else self.shape_proper[1],\n loc.step if loc.step is not None else 1,\n )\n else:\n assert isinstance(loc, np.ndarray)\n assert loc.dtype == "bool"\n indices = np.nonzero(loc)[0]\n\n assert value.ndim == 2\n assert value.shape[0] == len(self._axes[0])\n\n for value_idx, mgr_idx in enumerate(indices):\n # error: No overload variant of "__getitem__" of "ExtensionArray" matches\n # argument type "Tuple[slice, int]"\n value_arr = value[:, value_idx] # type: ignore[call-overload]\n self.arrays[mgr_idx] = value_arr\n return\n\n def column_setitem(\n self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False\n ) -> None:\n """\n Set values ("setitem") into a single column (not setting the full column).\n\n This is a method on the ArrayManager level, to avoid creating an\n intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`)\n """\n if not is_integer(loc):\n raise TypeError("The column index should be an integer")\n arr = self.arrays[loc]\n mgr = SingleArrayManager([arr], [self._axes[0]])\n if inplace_only:\n mgr.setitem_inplace(idx, value)\n else:\n new_mgr = mgr.setitem((idx,), value)\n # update existing ArrayManager in-place\n self.arrays[loc] = new_mgr.arrays[0]\n\n def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None:\n """\n Insert item at selected position.\n\n Parameters\n ----------\n loc : int\n item : hashable\n value : np.ndarray or ExtensionArray\n """\n # insert to the axis; this could possibly raise a TypeError\n new_axis = self.items.insert(loc, item)\n\n value = extract_array(value, extract_numpy=True)\n if value.ndim == 2:\n if value.shape[0] == 1:\n # error: No overload variant of "__getitem__" of "ExtensionArray"\n # matches argument type "Tuple[int, slice]"\n value = value[0, :] # type: ignore[call-overload]\n else:\n raise ValueError(\n f"Expected a 1D array, got an array with shape {value.shape}"\n )\n value = maybe_coerce_values(value)\n\n # TODO self.arrays can be empty\n # assert len(value) == len(self.arrays[0])\n\n # TODO is this copy needed?\n arrays = self.arrays.copy()\n arrays.insert(loc, value)\n\n self.arrays = arrays\n self._axes[1] = new_axis\n\n def idelete(self, indexer) -> ArrayManager:\n """\n Delete selected locations in-place (new block and array, same BlockManager)\n """\n to_keep = np.ones(self.shape[0], dtype=np.bool_)\n to_keep[indexer] = False\n\n self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]]\n self._axes = [self._axes[0], self._axes[1][to_keep]]\n return self\n\n # --------------------------------------------------------------------\n # Array-wise Operation\n\n def grouped_reduce(self, func: Callable) -> Self:\n """\n Apply grouped reduction function columnwise, returning a new ArrayManager.\n\n Parameters\n ----------\n func : grouped reduction function\n\n Returns\n -------\n ArrayManager\n """\n result_arrays: list[np.ndarray] = []\n result_indices: list[int] = []\n\n for i, arr in enumerate(self.arrays):\n # grouped_reduce functions all expect 2D arrays\n arr = ensure_block_shape(arr, ndim=2)\n res = func(arr)\n if res.ndim == 2:\n # reverse of ensure_block_shape\n assert res.shape[0] == 1\n res = res[0]\n\n result_arrays.append(res)\n result_indices.append(i)\n\n if len(result_arrays) == 0:\n nrows = 0\n else:\n nrows = result_arrays[0].shape[0]\n index = Index(range(nrows))\n\n columns = self.items\n\n # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";\n # expected "List[Union[ndarray, ExtensionArray]]"\n return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type]\n\n def reduce(self, func: Callable) -> Self:\n """\n Apply reduction function column-wise, returning a single-row ArrayManager.\n\n Parameters\n ----------\n func : reduction function\n\n Returns\n -------\n ArrayManager\n """\n result_arrays: list[np.ndarray] = []\n for i, arr in enumerate(self.arrays):\n res = func(arr, axis=0)\n\n # TODO NaT doesn't preserve dtype, so we need to ensure to create\n # a timedelta result array if original was timedelta\n # what if datetime results in timedelta? (eg std)\n dtype = arr.dtype if res is NaT else None\n result_arrays.append(\n sanitize_array([res], None, dtype=dtype) # type: ignore[arg-type]\n )\n\n index = Index._simple_new(np.array([None], dtype=object)) # placeholder\n columns = self.items\n\n # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";\n # expected "List[Union[ndarray, ExtensionArray]]"\n new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type]\n return new_mgr\n\n def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager:\n """\n Apply array_op blockwise with another (aligned) BlockManager.\n """\n # TODO what if `other` is BlockManager ?\n left_arrays = self.arrays\n right_arrays = other.arrays\n result_arrays = [\n array_op(left, right) for left, right in zip(left_arrays, right_arrays)\n ]\n return type(self)(result_arrays, self._axes)\n\n def quantile(\n self,\n *,\n qs: Index, # with dtype float64\n transposed: bool = False,\n interpolation: QuantileInterpolation = "linear",\n ) -> ArrayManager:\n arrs = [ensure_block_shape(x, 2) for x in self.arrays]\n new_arrs = [\n quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs\n ]\n for i, arr in enumerate(new_arrs):\n if arr.ndim == 2:\n assert arr.shape[0] == 1, arr.shape\n new_arrs[i] = arr[0]\n\n axes = [qs, self._axes[1]]\n return type(self)(new_arrs, axes)\n\n # ----------------------------------------------------------------\n\n def unstack(self, unstacker, fill_value) -> ArrayManager:\n """\n Return a BlockManager with all blocks unstacked.\n\n Parameters\n ----------\n unstacker : reshape._Unstacker\n fill_value : Any\n fill_value for newly introduced missing values.\n\n Returns\n -------\n unstacked : BlockManager\n """\n indexer, _ = unstacker._indexer_and_to_sort\n if unstacker.mask.all():\n new_indexer = indexer\n allow_fill = False\n new_mask2D = None\n needs_masking = None\n else:\n new_indexer = np.full(unstacker.mask.shape, -1)\n new_indexer[unstacker.mask] = indexer\n allow_fill = True\n # calculating the full mask once and passing it to take_1d is faster\n # than letting take_1d calculate it in each repeated call\n new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape)\n needs_masking = new_mask2D.any(axis=0)\n new_indexer2D = new_indexer.reshape(*unstacker.full_shape)\n new_indexer2D = ensure_platform_int(new_indexer2D)\n\n new_arrays = []\n for arr in self.arrays:\n for i in range(unstacker.full_shape[1]):\n if allow_fill:\n # error: Value of type "Optional[Any]" is not indexable [index]\n new_arr = take_1d(\n arr,\n new_indexer2D[:, i],\n allow_fill=needs_masking[i], # type: ignore[index]\n fill_value=fill_value,\n mask=new_mask2D[:, i], # type: ignore[index]\n )\n else:\n new_arr = take_1d(arr, new_indexer2D[:, i], allow_fill=False)\n new_arrays.append(new_arr)\n\n new_index = unstacker.new_index\n new_columns = unstacker.get_new_columns(self._axes[1])\n new_axes = [new_index, new_columns]\n\n return type(self)(new_arrays, new_axes, verify_integrity=False)\n\n def as_array(\n self,\n dtype=None,\n copy: bool = False,\n na_value: object = lib.no_default,\n ) -> np.ndarray:\n """\n Convert the blockmanager data into an numpy array.\n\n Parameters\n ----------\n dtype : object, default None\n Data type of the return array.\n copy : bool, default False\n If True then guarantee that a copy is returned. A value of\n False does not guarantee that the underlying data is not\n copied.\n na_value : object, default lib.no_default\n Value to be used as the missing value sentinel.\n\n Returns\n -------\n arr : ndarray\n """\n if len(self.arrays) == 0:\n empty_arr = np.empty(self.shape, dtype=float)\n return empty_arr.transpose()\n\n # We want to copy when na_value is provided to avoid\n # mutating the original object\n copy = copy or na_value is not lib.no_default\n\n if not dtype:\n dtype = interleaved_dtype([arr.dtype for arr in self.arrays])\n\n dtype = ensure_np_dtype(dtype)\n\n result = np.empty(self.shape_proper, dtype=dtype)\n\n for i, arr in enumerate(self.arrays):\n arr = arr.astype(dtype, copy=copy)\n result[:, i] = arr\n\n if na_value is not lib.no_default:\n result[isna(result)] = na_value\n\n return result\n\n @classmethod\n def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self:\n """\n Concatenate uniformly-indexed ArrayManagers horizontally.\n """\n # concatting along the columns -> combine reindexed arrays in a single manager\n arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))\n new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False)\n return new_mgr\n\n @classmethod\n def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self:\n """\n Concatenate uniformly-indexed ArrayManagers vertically.\n """\n # concatting along the rows -> concat the reindexed arrays\n # TODO(ArrayManager) doesn't yet preserve the correct dtype\n arrays = [\n concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])\n for j in range(len(mgrs[0].arrays))\n ]\n new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False)\n return new_mgr\n\n\nclass SingleArrayManager(BaseArrayManager, SingleDataManager):\n __slots__ = [\n "_axes", # private attribute, because 'axes' has different order, see below\n "arrays",\n ]\n\n arrays: list[np.ndarray | ExtensionArray]\n _axes: list[Index]\n\n @property\n def ndim(self) -> Literal[1]:\n return 1\n\n def __init__(\n self,\n arrays: list[np.ndarray | ExtensionArray],\n axes: list[Index],\n verify_integrity: bool = True,\n ) -> None:\n self._axes = axes\n self.arrays = arrays\n\n if verify_integrity:\n assert len(axes) == 1\n assert len(arrays) == 1\n self._axes = [ensure_index(ax) for ax in self._axes]\n arr = arrays[0]\n arr = maybe_coerce_values(arr)\n arr = extract_pandas_array(arr, None, 1)[0]\n self.arrays = [arr]\n self._verify_integrity()\n\n def _verify_integrity(self) -> None:\n (n_rows,) = self.shape\n assert len(self.arrays) == 1\n arr = self.arrays[0]\n assert len(arr) == n_rows\n if not arr.ndim == 1:\n raise ValueError(\n "Passed array should be 1-dimensional, got array with "\n f"{arr.ndim} dimensions instead."\n )\n\n @staticmethod\n def _normalize_axis(axis):\n return axis\n\n def make_empty(self, axes=None) -> Self:\n """Return an empty ArrayManager with index/array of length 0"""\n if axes is None:\n axes = [Index([], dtype=object)]\n array: np.ndarray = np.array([], dtype=self.dtype)\n return type(self)([array], axes)\n\n @classmethod\n def from_array(cls, array, index) -> SingleArrayManager:\n return cls([array], [index])\n\n # error: Cannot override writeable attribute with read-only property\n @property\n def axes(self) -> list[Index]: # type: ignore[override]\n return self._axes\n\n @property\n def index(self) -> Index:\n return self._axes[0]\n\n @property\n def dtype(self):\n return self.array.dtype\n\n def external_values(self):\n """The array that Series.values returns"""\n return external_values(self.array)\n\n def internal_values(self):\n """The array that Series._values returns"""\n return self.array\n\n def array_values(self):\n """The array that Series.array returns"""\n arr = self.array\n if isinstance(arr, np.ndarray):\n arr = NumpyExtensionArray(arr)\n return arr\n\n @property\n def _can_hold_na(self) -> bool:\n if isinstance(self.array, np.ndarray):\n return self.array.dtype.kind not in "iub"\n else:\n # ExtensionArray\n return self.array._can_hold_na\n\n @property\n def is_single_block(self) -> bool:\n return True\n\n def fast_xs(self, loc: int) -> SingleArrayManager:\n raise NotImplementedError("Use series._values[loc] instead")\n\n def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleArrayManager:\n if axis >= self.ndim:\n raise IndexError("Requested axis not found in manager")\n\n new_array = self.array[slobj]\n new_index = self.index._getitem_slice(slobj)\n return type(self)([new_array], [new_index], verify_integrity=False)\n\n def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> SingleArrayManager:\n new_array = self.array[indexer]\n new_index = self.index[indexer]\n return type(self)([new_array], [new_index])\n\n # error: Signature of "apply" incompatible with supertype "BaseArrayManager"\n def apply(self, func, **kwargs) -> Self: # type: ignore[override]\n if callable(func):\n new_array = func(self.array, **kwargs)\n else:\n new_array = getattr(self.array, func)(**kwargs)\n return type(self)([new_array], self._axes)\n\n def setitem(self, indexer, value, warn: bool = True) -> SingleArrayManager:\n """\n Set values with indexer.\n\n For SingleArrayManager, this backs s[indexer] = value\n\n See `setitem_inplace` for a version that works inplace and doesn't\n return a new Manager.\n """\n if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:\n raise ValueError(f"Cannot set values with ndim > {self.ndim}")\n return self.apply_with_block("setitem", indexer=indexer, value=value)\n\n def idelete(self, indexer) -> SingleArrayManager:\n """\n Delete selected locations in-place (new array, same ArrayManager)\n """\n to_keep = np.ones(self.shape[0], dtype=np.bool_)\n to_keep[indexer] = False\n\n self.arrays = [self.arrays[0][to_keep]]\n self._axes = [self._axes[0][to_keep]]\n return self\n\n def _get_data_subset(self, predicate: Callable) -> SingleArrayManager:\n # used in get_numeric_data / get_bool_data\n if predicate(self.array):\n return type(self)(self.arrays, self._axes, verify_integrity=False)\n else:\n return self.make_empty()\n\n def set_values(self, values: ArrayLike) -> None:\n """\n Set (replace) the values of the SingleArrayManager in place.\n\n Use at your own risk! This does not check if the passed values are\n valid for the current SingleArrayManager (length, dtype, etc).\n """\n self.arrays[0] = values\n\n def to_2d_mgr(self, columns: Index) -> ArrayManager:\n """\n Manager analogue of Series.to_frame\n """\n arrays = [self.arrays[0]]\n axes = [self.axes[0], columns]\n\n return ArrayManager(arrays, axes, verify_integrity=False)\n\n\nclass NullArrayProxy:\n """\n Proxy object for an all-NA array.\n\n Only stores the length of the array, and not the dtype. The dtype\n will only be known when actually concatenating (after determining the\n common dtype, for which this proxy is ignored).\n Using this object avoids that the internals/concat.py needs to determine\n the proper dtype and array type.\n """\n\n ndim = 1\n\n def __init__(self, n: int) -> None:\n self.n = n\n\n @property\n def shape(self) -> tuple[int]:\n return (self.n,)\n\n def to_array(self, dtype: DtypeObj) -> ArrayLike:\n """\n Helper function to create the actual all-NA array from the NullArrayProxy\n object.\n\n Parameters\n ----------\n arr : NullArrayProxy\n dtype : the dtype for the resulting array\n\n Returns\n -------\n np.ndarray or ExtensionArray\n """\n if isinstance(dtype, ExtensionDtype):\n empty = dtype.construct_array_type()._from_sequence([], dtype=dtype)\n indexer = -np.ones(self.n, dtype=np.intp)\n return empty.take(indexer, allow_fill=True)\n else:\n # when introducing missing values, int becomes float, bool becomes object\n dtype = ensure_dtype_can_hold_na(dtype)\n fill_value = na_value_for_dtype(dtype)\n arr = np.empty(self.n, dtype=dtype)\n arr.fill(fill_value)\n return ensure_wrapped_if_datetimelike(arr)\n\n\ndef concat_arrays(to_concat: list) -> ArrayLike:\n """\n Alternative for concat_compat but specialized for use in the ArrayManager.\n\n Differences: only deals with 1D arrays (no axis keyword), assumes\n ensure_wrapped_if_datetimelike and does not skip empty arrays to determine\n the dtype.\n In addition ensures that all NullArrayProxies get replaced with actual\n arrays.\n\n Parameters\n ----------\n to_concat : list of arrays\n\n Returns\n -------\n np.ndarray or ExtensionArray\n """\n # ignore the all-NA proxies to determine the resulting dtype\n to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]\n\n dtypes = {x.dtype for x in to_concat_no_proxy}\n single_dtype = len(dtypes) == 1\n\n if single_dtype:\n target_dtype = to_concat_no_proxy[0].dtype\n elif all(lib.is_np_dtype(x, "iub") for x in dtypes):\n # GH#42092\n target_dtype = np_find_common_type(*dtypes)\n else:\n target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy])\n\n to_concat = [\n arr.to_array(target_dtype)\n if isinstance(arr, NullArrayProxy)\n else astype_array(arr, target_dtype, copy=False)\n for arr in to_concat\n ]\n\n if isinstance(to_concat[0], ExtensionArray):\n cls = type(to_concat[0])\n return cls._concat_same_type(to_concat)\n\n result = np.concatenate(to_concat)\n\n # TODO decide on exact behaviour (we shouldn't do this only for empty result)\n # see https://github.com/pandas-dev/pandas/issues/39817\n if len(result) == 0:\n # all empties -> check for bool to not coerce to float\n kinds = {obj.dtype.kind for obj in to_concat_no_proxy}\n if len(kinds) != 1:\n if "b" in kinds:\n result = result.astype(object)\n return result\n | .venv\Lib\site-packages\pandas\core\internals\array_manager.py | array_manager.py | Python | 43,927 | 0.95 | 0.188806 | 0.081033 | python-kit | 51 | 2024-09-04T13:41:18.125513 | BSD-3-Clause | false | a8f5a02d0016d200e294aee61a49f308 |
"""\nBase class for the internal managers. Both BlockManager and ArrayManager\ninherit from this class.\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Literal,\n cast,\n final,\n)\n\nimport numpy as np\n\nfrom pandas._config import (\n using_copy_on_write,\n warn_copy_on_write,\n)\n\nfrom pandas._libs import (\n algos as libalgos,\n lib,\n)\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.cast import (\n find_common_type,\n np_can_hold_element,\n)\nfrom pandas.core.dtypes.dtypes import (\n ExtensionDtype,\n SparseDtype,\n)\n\nfrom pandas.core.base import PandasObject\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexes.api import (\n Index,\n default_index,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n AxisInt,\n DtypeObj,\n Self,\n Shape,\n )\n\n\nclass _AlreadyWarned:\n def __init__(self):\n # This class is used on the manager level to the block level to\n # ensure that we warn only once. The block method can update the\n # warned_already option without returning a value to keep the\n # interface consistent. This is only a temporary solution for\n # CoW warnings.\n self.warned_already = False\n\n\nclass DataManager(PandasObject):\n # TODO share more methods/attributes\n\n axes: list[Index]\n\n @property\n def items(self) -> Index:\n raise AbstractMethodError(self)\n\n @final\n def __len__(self) -> int:\n return len(self.items)\n\n @property\n def ndim(self) -> int:\n return len(self.axes)\n\n @property\n def shape(self) -> Shape:\n return tuple(len(ax) for ax in self.axes)\n\n @final\n def _validate_set_axis(self, axis: AxisInt, new_labels: Index) -> None:\n # Caller is responsible for ensuring we have an Index object.\n old_len = len(self.axes[axis])\n new_len = len(new_labels)\n\n if axis == 1 and len(self.items) == 0:\n # If we are setting the index on a DataFrame with no columns,\n # it is OK to change the length.\n pass\n\n elif new_len != old_len:\n raise ValueError(\n f"Length mismatch: Expected axis has {old_len} elements, new "\n f"values have {new_len} elements"\n )\n\n def reindex_indexer(\n self,\n new_axis,\n indexer,\n axis: AxisInt,\n fill_value=None,\n allow_dups: bool = False,\n copy: bool = True,\n only_slice: bool = False,\n ) -> Self:\n raise AbstractMethodError(self)\n\n @final\n def reindex_axis(\n self,\n new_index: Index,\n axis: AxisInt,\n fill_value=None,\n only_slice: bool = False,\n ) -> Self:\n """\n Conform data manager to new index.\n """\n new_index, indexer = self.axes[axis].reindex(new_index)\n\n return self.reindex_indexer(\n new_index,\n indexer,\n axis=axis,\n fill_value=fill_value,\n copy=False,\n only_slice=only_slice,\n )\n\n def _equal_values(self, other: Self) -> bool:\n """\n To be implemented by the subclasses. Only check the column values\n assuming shape and indexes have already been checked.\n """\n raise AbstractMethodError(self)\n\n @final\n def equals(self, other: object) -> bool:\n """\n Implementation for DataFrame.equals\n """\n if not isinstance(other, type(self)):\n return False\n\n self_axes, other_axes = self.axes, other.axes\n if len(self_axes) != len(other_axes):\n return False\n if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):\n return False\n\n return self._equal_values(other)\n\n def apply(\n self,\n f,\n align_keys: list[str] | None = None,\n **kwargs,\n ) -> Self:\n raise AbstractMethodError(self)\n\n def apply_with_block(\n self,\n f,\n align_keys: list[str] | None = None,\n **kwargs,\n ) -> Self:\n raise AbstractMethodError(self)\n\n @final\n def isna(self, func) -> Self:\n return self.apply("apply", func=func)\n\n @final\n def fillna(self, value, limit: int | None, inplace: bool, downcast) -> Self:\n if limit is not None:\n # Do this validation even if we go through one of the no-op paths\n limit = libalgos.validate_limit(None, limit=limit)\n\n return self.apply_with_block(\n "fillna",\n value=value,\n limit=limit,\n inplace=inplace,\n downcast=downcast,\n using_cow=using_copy_on_write(),\n already_warned=_AlreadyWarned(),\n )\n\n @final\n def where(self, other, cond, align: bool) -> Self:\n if align:\n align_keys = ["other", "cond"]\n else:\n align_keys = ["cond"]\n other = extract_array(other, extract_numpy=True)\n\n return self.apply_with_block(\n "where",\n align_keys=align_keys,\n other=other,\n cond=cond,\n using_cow=using_copy_on_write(),\n )\n\n @final\n def putmask(self, mask, new, align: bool = True, warn: bool = True) -> Self:\n if align:\n align_keys = ["new", "mask"]\n else:\n align_keys = ["mask"]\n new = extract_array(new, extract_numpy=True)\n\n already_warned = None\n if warn_copy_on_write():\n already_warned = _AlreadyWarned()\n if not warn:\n already_warned.warned_already = True\n\n return self.apply_with_block(\n "putmask",\n align_keys=align_keys,\n mask=mask,\n new=new,\n using_cow=using_copy_on_write(),\n already_warned=already_warned,\n )\n\n @final\n def round(self, decimals: int, using_cow: bool = False) -> Self:\n return self.apply_with_block(\n "round",\n decimals=decimals,\n using_cow=using_cow,\n )\n\n @final\n def replace(self, to_replace, value, inplace: bool) -> Self:\n inplace = validate_bool_kwarg(inplace, "inplace")\n # NDFrame.replace ensures the not-is_list_likes here\n assert not lib.is_list_like(to_replace)\n assert not lib.is_list_like(value)\n return self.apply_with_block(\n "replace",\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n using_cow=using_copy_on_write(),\n already_warned=_AlreadyWarned(),\n )\n\n @final\n def replace_regex(self, **kwargs) -> Self:\n return self.apply_with_block(\n "_replace_regex",\n **kwargs,\n using_cow=using_copy_on_write(),\n already_warned=_AlreadyWarned(),\n )\n\n @final\n def replace_list(\n self,\n src_list: list[Any],\n dest_list: list[Any],\n inplace: bool = False,\n regex: bool = False,\n ) -> Self:\n """do a list replace"""\n inplace = validate_bool_kwarg(inplace, "inplace")\n\n bm = self.apply_with_block(\n "replace_list",\n src_list=src_list,\n dest_list=dest_list,\n inplace=inplace,\n regex=regex,\n using_cow=using_copy_on_write(),\n already_warned=_AlreadyWarned(),\n )\n bm._consolidate_inplace()\n return bm\n\n def interpolate(self, inplace: bool, **kwargs) -> Self:\n return self.apply_with_block(\n "interpolate",\n inplace=inplace,\n **kwargs,\n using_cow=using_copy_on_write(),\n already_warned=_AlreadyWarned(),\n )\n\n def pad_or_backfill(self, inplace: bool, **kwargs) -> Self:\n return self.apply_with_block(\n "pad_or_backfill",\n inplace=inplace,\n **kwargs,\n using_cow=using_copy_on_write(),\n already_warned=_AlreadyWarned(),\n )\n\n def shift(self, periods: int, fill_value) -> Self:\n if fill_value is lib.no_default:\n fill_value = None\n\n return self.apply_with_block("shift", periods=periods, fill_value=fill_value)\n\n # --------------------------------------------------------------------\n # Consolidation: No-ops for all but BlockManager\n\n def is_consolidated(self) -> bool:\n return True\n\n def consolidate(self) -> Self:\n return self\n\n def _consolidate_inplace(self) -> None:\n return\n\n\nclass SingleDataManager(DataManager):\n @property\n def ndim(self) -> Literal[1]:\n return 1\n\n @final\n @property\n def array(self) -> ArrayLike:\n """\n Quick access to the backing array of the Block or SingleArrayManager.\n """\n # error: "SingleDataManager" has no attribute "arrays"; maybe "array"\n return self.arrays[0] # type: ignore[attr-defined]\n\n def setitem_inplace(self, indexer, value, warn: bool = True) -> None:\n """\n Set values with indexer.\n\n For Single[Block/Array]Manager, this backs s[indexer] = value\n\n This is an inplace version of `setitem()`, mutating the manager/values\n in place, not returning a new Manager (and Block), and thus never changing\n the dtype.\n """\n arr = self.array\n\n # EAs will do this validation in their own __setitem__ methods.\n if isinstance(arr, np.ndarray):\n # Note: checking for ndarray instead of np.dtype means we exclude\n # dt64/td64, which do their own validation.\n value = np_can_hold_element(arr.dtype, value)\n\n if isinstance(value, np.ndarray) and value.ndim == 1 and len(value) == 1:\n # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615\n value = value[0, ...]\n\n arr[indexer] = value\n\n def grouped_reduce(self, func):\n arr = self.array\n res = func(arr)\n index = default_index(len(res))\n\n mgr = type(self).from_array(res, index)\n return mgr\n\n @classmethod\n def from_array(cls, arr: ArrayLike, index: Index):\n raise AbstractMethodError(cls)\n\n\ndef interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None:\n """\n Find the common dtype for `blocks`.\n\n Parameters\n ----------\n blocks : List[DtypeObj]\n\n Returns\n -------\n dtype : np.dtype, ExtensionDtype, or None\n None is returned when `blocks` is empty.\n """\n if not len(dtypes):\n return None\n\n return find_common_type(dtypes)\n\n\ndef ensure_np_dtype(dtype: DtypeObj) -> np.dtype:\n # TODO: https://github.com/pandas-dev/pandas/issues/22791\n # Give EAs some input on what happens here. Sparse needs this.\n if isinstance(dtype, SparseDtype):\n dtype = dtype.subtype\n dtype = cast(np.dtype, dtype)\n elif isinstance(dtype, ExtensionDtype):\n dtype = np.dtype("object")\n elif dtype == np.dtype(str):\n dtype = np.dtype("object")\n return dtype\n | .venv\Lib\site-packages\pandas\core\internals\base.py | base.py | Python | 11,161 | 0.95 | 0.157248 | 0.073746 | vue-tools | 318 | 2024-08-22T09:24:17.604820 | MIT | false | f0703112322840cbb1d21c8410226038 |
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n NaT,\n algos as libalgos,\n internals as libinternals,\n lib,\n)\nfrom pandas._libs.missing import NA\nfrom pandas.util._decorators import cache_readonly\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.cast import (\n ensure_dtype_can_hold_na,\n find_common_type,\n)\nfrom pandas.core.dtypes.common import (\n is_1d_only_ea_dtype,\n is_scalar,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.concat import concat_compat\nfrom pandas.core.dtypes.dtypes import (\n ExtensionDtype,\n SparseDtype,\n)\nfrom pandas.core.dtypes.missing import (\n is_valid_na_for_dtype,\n isna,\n isna_all,\n)\n\nfrom pandas.core.construction import ensure_wrapped_if_datetimelike\nfrom pandas.core.internals.array_manager import ArrayManager\nfrom pandas.core.internals.blocks import (\n ensure_block_shape,\n new_block_2d,\n)\nfrom pandas.core.internals.managers import (\n BlockManager,\n make_na_array,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from pandas._typing import (\n ArrayLike,\n AxisInt,\n DtypeObj,\n Manager2D,\n Shape,\n )\n\n from pandas import Index\n from pandas.core.internals.blocks import (\n Block,\n BlockPlacement,\n )\n\n\ndef _concatenate_array_managers(\n mgrs: list[ArrayManager], axes: list[Index], concat_axis: AxisInt\n) -> Manager2D:\n """\n Concatenate array managers into one.\n\n Parameters\n ----------\n mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples\n axes : list of Index\n concat_axis : int\n\n Returns\n -------\n ArrayManager\n """\n if concat_axis == 1:\n return mgrs[0].concat_vertical(mgrs, axes)\n else:\n # concatting along the columns -> combine reindexed arrays in a single manager\n assert concat_axis == 0\n return mgrs[0].concat_horizontal(mgrs, axes)\n\n\ndef concatenate_managers(\n mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool\n) -> Manager2D:\n """\n Concatenate block managers into one.\n\n Parameters\n ----------\n mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples\n axes : list of Index\n concat_axis : int\n copy : bool\n\n Returns\n -------\n BlockManager\n """\n\n needs_copy = copy and concat_axis == 0\n\n # TODO(ArrayManager) this assumes that all managers are of the same type\n if isinstance(mgrs_indexers[0][0], ArrayManager):\n mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)\n # error: Argument 1 to "_concatenate_array_managers" has incompatible\n # type "List[BlockManager]"; expected "List[Union[ArrayManager,\n # SingleArrayManager, BlockManager, SingleBlockManager]]"\n return _concatenate_array_managers(\n mgrs, axes, concat_axis # type: ignore[arg-type]\n )\n\n # Assertions disabled for performance\n # for tup in mgrs_indexers:\n # # caller is responsible for ensuring this\n # indexers = tup[1]\n # assert concat_axis not in indexers\n\n if concat_axis == 0:\n mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)\n return mgrs[0].concat_horizontal(mgrs, axes)\n\n if len(mgrs_indexers) > 0 and mgrs_indexers[0][0].nblocks > 0:\n first_dtype = mgrs_indexers[0][0].blocks[0].dtype\n if first_dtype in [np.float64, np.float32]:\n # TODO: support more dtypes here. This will be simpler once\n # JoinUnit.is_na behavior is deprecated.\n if (\n all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in mgrs_indexers)\n and len(mgrs_indexers) > 1\n ):\n # Fastpath!\n # Length restriction is just to avoid having to worry about 'copy'\n shape = tuple(len(x) for x in axes)\n nb = _concat_homogeneous_fastpath(mgrs_indexers, shape, first_dtype)\n return BlockManager((nb,), axes)\n\n mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)\n\n if len(mgrs) == 1:\n mgr = mgrs[0]\n out = mgr.copy(deep=False)\n out.axes = axes\n return out\n\n concat_plan = _get_combined_plan(mgrs)\n\n blocks = []\n values: ArrayLike\n\n for placement, join_units in concat_plan:\n unit = join_units[0]\n blk = unit.block\n\n if _is_uniform_join_units(join_units):\n vals = [ju.block.values for ju in join_units]\n\n if not blk.is_extension:\n # _is_uniform_join_units ensures a single dtype, so\n # we can use np.concatenate, which is more performant\n # than concat_compat\n # error: Argument 1 to "concatenate" has incompatible type\n # "List[Union[ndarray[Any, Any], ExtensionArray]]";\n # expected "Union[_SupportsArray[dtype[Any]],\n # _NestedSequence[_SupportsArray[dtype[Any]]]]"\n values = np.concatenate(vals, axis=1) # type: ignore[arg-type]\n elif is_1d_only_ea_dtype(blk.dtype):\n # TODO(EA2D): special-casing not needed with 2D EAs\n values = concat_compat(vals, axis=0, ea_compat_axis=True)\n values = ensure_block_shape(values, ndim=2)\n else:\n values = concat_compat(vals, axis=1)\n\n values = ensure_wrapped_if_datetimelike(values)\n\n fastpath = blk.values.dtype == values.dtype\n else:\n values = _concatenate_join_units(join_units, copy=copy)\n fastpath = False\n\n if fastpath:\n b = blk.make_block_same_class(values, placement=placement)\n else:\n b = new_block_2d(values, placement=placement)\n\n blocks.append(b)\n\n return BlockManager(tuple(blocks), axes)\n\n\ndef _maybe_reindex_columns_na_proxy(\n axes: list[Index],\n mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]],\n needs_copy: bool,\n) -> list[BlockManager]:\n """\n Reindex along columns so that all of the BlockManagers being concatenated\n have matching columns.\n\n Columns added in this reindexing have dtype=np.void, indicating they\n should be ignored when choosing a column's final dtype.\n """\n new_mgrs = []\n\n for mgr, indexers in mgrs_indexers:\n # For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this\n # is a cheap reindexing.\n for i, indexer in indexers.items():\n mgr = mgr.reindex_indexer(\n axes[i],\n indexers[i],\n axis=i,\n copy=False,\n only_slice=True, # only relevant for i==0\n allow_dups=True,\n use_na_proxy=True, # only relevant for i==0\n )\n if needs_copy and not indexers:\n mgr = mgr.copy()\n\n new_mgrs.append(mgr)\n return new_mgrs\n\n\ndef _is_homogeneous_mgr(mgr: BlockManager, first_dtype: DtypeObj) -> bool:\n """\n Check if this Manager can be treated as a single ndarray.\n """\n if mgr.nblocks != 1:\n return False\n blk = mgr.blocks[0]\n if not (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1):\n return False\n\n return blk.dtype == first_dtype\n\n\ndef _concat_homogeneous_fastpath(\n mgrs_indexers, shape: Shape, first_dtype: np.dtype\n) -> Block:\n """\n With single-Block managers with homogeneous dtypes (that can already hold nan),\n we avoid [...]\n """\n # assumes\n # all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in in mgrs_indexers)\n\n if all(not indexers for _, indexers in mgrs_indexers):\n # https://github.com/pandas-dev/pandas/pull/52685#issuecomment-1523287739\n arrs = [mgr.blocks[0].values.T for mgr, _ in mgrs_indexers]\n arr = np.concatenate(arrs).T\n bp = libinternals.BlockPlacement(slice(shape[0]))\n nb = new_block_2d(arr, bp)\n return nb\n\n arr = np.empty(shape, dtype=first_dtype)\n\n if first_dtype == np.float64:\n take_func = libalgos.take_2d_axis0_float64_float64\n else:\n take_func = libalgos.take_2d_axis0_float32_float32\n\n start = 0\n for mgr, indexers in mgrs_indexers:\n mgr_len = mgr.shape[1]\n end = start + mgr_len\n\n if 0 in indexers:\n take_func(\n mgr.blocks[0].values,\n indexers[0],\n arr[:, start:end],\n )\n else:\n # No reindexing necessary, we can copy values directly\n arr[:, start:end] = mgr.blocks[0].values\n\n start += mgr_len\n\n bp = libinternals.BlockPlacement(slice(shape[0]))\n nb = new_block_2d(arr, bp)\n return nb\n\n\ndef _get_combined_plan(\n mgrs: list[BlockManager],\n) -> list[tuple[BlockPlacement, list[JoinUnit]]]:\n plan = []\n\n max_len = mgrs[0].shape[0]\n\n blknos_list = [mgr.blknos for mgr in mgrs]\n pairs = libinternals.get_concat_blkno_indexers(blknos_list)\n for ind, (blknos, bp) in enumerate(pairs):\n # assert bp.is_slice_like\n # assert len(bp) > 0\n\n units_for_bp = []\n for k, mgr in enumerate(mgrs):\n blkno = blknos[k]\n\n nb = _get_block_for_concat_plan(mgr, bp, blkno, max_len=max_len)\n unit = JoinUnit(nb)\n units_for_bp.append(unit)\n\n plan.append((bp, units_for_bp))\n\n return plan\n\n\ndef _get_block_for_concat_plan(\n mgr: BlockManager, bp: BlockPlacement, blkno: int, *, max_len: int\n) -> Block:\n blk = mgr.blocks[blkno]\n # Assertions disabled for performance:\n # assert bp.is_slice_like\n # assert blkno != -1\n # assert (mgr.blknos[bp] == blkno).all()\n\n if len(bp) == len(blk.mgr_locs) and (\n blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1\n ):\n nb = blk\n else:\n ax0_blk_indexer = mgr.blklocs[bp.indexer]\n\n slc = lib.maybe_indices_to_slice(ax0_blk_indexer, max_len)\n # TODO: in all extant test cases 2023-04-08 we have a slice here.\n # Will this always be the case?\n if isinstance(slc, slice):\n nb = blk.slice_block_columns(slc)\n else:\n nb = blk.take_block_columns(slc)\n\n # assert nb.shape == (len(bp), mgr.shape[1])\n return nb\n\n\nclass JoinUnit:\n def __init__(self, block: Block) -> None:\n self.block = block\n\n def __repr__(self) -> str:\n return f"{type(self).__name__}({repr(self.block)})"\n\n def _is_valid_na_for(self, dtype: DtypeObj) -> bool:\n """\n Check that we are all-NA of a type/dtype that is compatible with this dtype.\n Augments `self.is_na` with an additional check of the type of NA values.\n """\n if not self.is_na:\n return False\n\n blk = self.block\n if blk.dtype.kind == "V":\n return True\n\n if blk.dtype == object:\n values = blk.values\n return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))\n\n na_value = blk.fill_value\n if na_value is NaT and blk.dtype != dtype:\n # e.g. we are dt64 and other is td64\n # fill_values match but we should not cast blk.values to dtype\n # TODO: this will need updating if we ever have non-nano dt64/td64\n return False\n\n if na_value is NA and needs_i8_conversion(dtype):\n # FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat\n # e.g. blk.dtype == "Int64" and dtype is td64, we dont want\n # to consider these as matching\n return False\n\n # TODO: better to use can_hold_element?\n return is_valid_na_for_dtype(na_value, dtype)\n\n @cache_readonly\n def is_na(self) -> bool:\n blk = self.block\n if blk.dtype.kind == "V":\n return True\n\n if not blk._can_hold_na:\n return False\n\n values = blk.values\n if values.size == 0:\n # GH#39122 this case will return False once deprecation is enforced\n return True\n\n if isinstance(values.dtype, SparseDtype):\n return False\n\n if values.ndim == 1:\n # TODO(EA2D): no need for special case with 2D EAs\n val = values[0]\n if not is_scalar(val) or not isna(val):\n # ideally isna_all would do this short-circuiting\n return False\n return isna_all(values)\n else:\n val = values[0][0]\n if not is_scalar(val) or not isna(val):\n # ideally isna_all would do this short-circuiting\n return False\n return all(isna_all(row) for row in values)\n\n @cache_readonly\n def is_na_after_size_and_isna_all_deprecation(self) -> bool:\n """\n Will self.is_na be True after values.size == 0 deprecation and isna_all\n deprecation are enforced?\n """\n blk = self.block\n if blk.dtype.kind == "V":\n return True\n return False\n\n def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:\n values: ArrayLike\n\n if upcasted_na is None and self.block.dtype.kind != "V":\n # No upcasting is necessary\n return self.block.values\n else:\n fill_value = upcasted_na\n\n if self._is_valid_na_for(empty_dtype):\n # note: always holds when self.block.dtype.kind == "V"\n blk_dtype = self.block.dtype\n\n if blk_dtype == np.dtype("object"):\n # we want to avoid filling with np.nan if we are\n # using None; we already know that we are all\n # nulls\n values = cast(np.ndarray, self.block.values)\n if values.size and values[0, 0] is None:\n fill_value = None\n\n return make_na_array(empty_dtype, self.block.shape, fill_value)\n\n return self.block.values\n\n\ndef _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike:\n """\n Concatenate values from several join units along axis=1.\n """\n empty_dtype, empty_dtype_future = _get_empty_dtype(join_units)\n\n has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)\n upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)\n\n to_concat = [\n ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)\n for ju in join_units\n ]\n\n if any(is_1d_only_ea_dtype(t.dtype) for t in to_concat):\n # TODO(EA2D): special case not needed if all EAs used HybridBlocks\n\n # error: No overload variant of "__getitem__" of "ExtensionArray" matches\n # argument type "Tuple[int, slice]"\n to_concat = [\n t\n if is_1d_only_ea_dtype(t.dtype)\n else t[0, :] # type: ignore[call-overload]\n for t in to_concat\n ]\n concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True)\n concat_values = ensure_block_shape(concat_values, 2)\n\n else:\n concat_values = concat_compat(to_concat, axis=1)\n\n if empty_dtype != empty_dtype_future:\n if empty_dtype == concat_values.dtype:\n # GH#39122, GH#40893\n warnings.warn(\n "The behavior of DataFrame concatenation with empty or all-NA "\n "entries is deprecated. In a future version, this will no longer "\n "exclude empty or all-NA columns when determining the result dtypes. "\n "To retain the old behavior, exclude the relevant entries before "\n "the concat operation.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return concat_values\n\n\ndef _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):\n """\n Find the NA value to go with this dtype.\n """\n if isinstance(dtype, ExtensionDtype):\n return dtype.na_value\n elif dtype.kind in "mM":\n return dtype.type("NaT")\n elif dtype.kind in "fc":\n return dtype.type("NaN")\n elif dtype.kind == "b":\n # different from missing.na_value_for_dtype\n return None\n elif dtype.kind in "iu":\n if not has_none_blocks:\n # different from missing.na_value_for_dtype\n return None\n return np.nan\n elif dtype.kind == "O":\n return np.nan\n raise NotImplementedError\n\n\ndef _get_empty_dtype(join_units: Sequence[JoinUnit]) -> tuple[DtypeObj, DtypeObj]:\n """\n Return dtype and N/A values to use when concatenating specified units.\n\n Returned N/A value may be None which means there was no casting involved.\n\n Returns\n -------\n dtype\n """\n if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]):\n empty_dtype = join_units[0].block.dtype\n return empty_dtype, empty_dtype\n\n has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)\n\n dtypes = [unit.block.dtype for unit in join_units if not unit.is_na]\n if not len(dtypes):\n dtypes = [\n unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V"\n ]\n\n dtype = find_common_type(dtypes)\n if has_none_blocks:\n dtype = ensure_dtype_can_hold_na(dtype)\n\n dtype_future = dtype\n if len(dtypes) != len(join_units):\n dtypes_future = [\n unit.block.dtype\n for unit in join_units\n if not unit.is_na_after_size_and_isna_all_deprecation\n ]\n if not len(dtypes_future):\n dtypes_future = [\n unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V"\n ]\n\n if len(dtypes) != len(dtypes_future):\n dtype_future = find_common_type(dtypes_future)\n if has_none_blocks:\n dtype_future = ensure_dtype_can_hold_na(dtype_future)\n\n return dtype, dtype_future\n\n\ndef _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:\n """\n Check if the join units consist of blocks of uniform type that can\n be concatenated using Block.concat_same_type instead of the generic\n _concatenate_join_units (which uses `concat_compat`).\n\n """\n first = join_units[0].block\n if first.dtype.kind == "V":\n return False\n return (\n # exclude cases where a) ju.block is None or b) we have e.g. Int64+int64\n all(type(ju.block) is type(first) for ju in join_units)\n and\n # e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform\n all(\n ju.block.dtype == first.dtype\n # GH#42092 we only want the dtype_equal check for non-numeric blocks\n # (for now, may change but that would need a deprecation)\n or ju.block.dtype.kind in "iub"\n for ju in join_units\n )\n and\n # no blocks that would get missing values (can lead to type upcasts)\n # unless we're an extension dtype.\n all(not ju.is_na or ju.block.is_extension for ju in join_units)\n )\n | .venv\Lib\site-packages\pandas\core\internals\concat.py | concat.py | Python | 19,151 | 0.95 | 0.190635 | 0.132383 | awesome-app | 619 | 2024-04-14T01:52:15.448995 | GPL-3.0 | false | 35d4147d1f5606e302e90812f9e3ff6c |
"""\nFunctions for preparing various inputs passed to the DataFrame or Series\nconstructors before passing them to a BlockManager.\n"""\nfrom __future__ import annotations\n\nfrom collections import abc\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nimport numpy as np\nfrom numpy import ma\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas._libs import lib\n\nfrom pandas.core.dtypes.astype import astype_is_view\nfrom pandas.core.dtypes.cast import (\n construct_1d_arraylike_from_scalar,\n dict_compat,\n maybe_cast_to_datetime,\n maybe_convert_platform,\n maybe_infer_to_datetimelike,\n)\nfrom pandas.core.dtypes.common import (\n is_1d_only_ea_dtype,\n is_integer_dtype,\n is_list_like,\n is_named_tuple,\n is_object_dtype,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\n\nfrom pandas.core import (\n algorithms,\n common as com,\n)\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.arrays.string_ import StringDtype\nfrom pandas.core.construction import (\n array as pd_array,\n ensure_wrapped_if_datetimelike,\n extract_array,\n range_to_ndarray,\n sanitize_array,\n)\nfrom pandas.core.indexes.api import (\n DatetimeIndex,\n Index,\n TimedeltaIndex,\n default_index,\n ensure_index,\n get_objs_combined_axis,\n union_indexes,\n)\nfrom pandas.core.internals.array_manager import (\n ArrayManager,\n SingleArrayManager,\n)\nfrom pandas.core.internals.blocks import (\n BlockPlacement,\n ensure_block_shape,\n new_block,\n new_block_2d,\n)\nfrom pandas.core.internals.managers import (\n BlockManager,\n SingleBlockManager,\n create_block_manager_from_blocks,\n create_block_manager_from_column_arrays,\n)\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Sequence,\n )\n\n from pandas._typing import (\n ArrayLike,\n DtypeObj,\n Manager,\n npt,\n )\n# ---------------------------------------------------------------------\n# BlockManager Interface\n\n\ndef arrays_to_mgr(\n arrays,\n columns: Index,\n index,\n *,\n dtype: DtypeObj | None = None,\n verify_integrity: bool = True,\n typ: str | None = None,\n consolidate: bool = True,\n) -> Manager:\n """\n Segregate Series based on type and coerce into matrices.\n\n Needs to handle a lot of exceptional cases.\n """\n if verify_integrity:\n # figure out the index, if necessary\n if index is None:\n index = _extract_index(arrays)\n else:\n index = ensure_index(index)\n\n # don't force copy because getting jammed in an ndarray anyway\n arrays, refs = _homogenize(arrays, index, dtype)\n # _homogenize ensures\n # - all(len(x) == len(index) for x in arrays)\n # - all(x.ndim == 1 for x in arrays)\n # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)\n # - all(type(x) is not NumpyExtensionArray for x in arrays)\n\n else:\n index = ensure_index(index)\n arrays = [extract_array(x, extract_numpy=True) for x in arrays]\n # with _from_arrays, the passed arrays should never be Series objects\n refs = [None] * len(arrays)\n\n # Reached via DataFrame._from_arrays; we do minimal validation here\n for arr in arrays:\n if (\n not isinstance(arr, (np.ndarray, ExtensionArray))\n or arr.ndim != 1\n or len(arr) != len(index)\n ):\n raise ValueError(\n "Arrays must be 1-dimensional np.ndarray or ExtensionArray "\n "with length matching len(index)"\n )\n\n columns = ensure_index(columns)\n if len(columns) != len(arrays):\n raise ValueError("len(arrays) must match len(columns)")\n\n # from BlockManager perspective\n axes = [columns, index]\n\n if typ == "block":\n return create_block_manager_from_column_arrays(\n arrays, axes, consolidate=consolidate, refs=refs\n )\n elif typ == "array":\n return ArrayManager(arrays, [index, columns])\n else:\n raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")\n\n\ndef rec_array_to_mgr(\n data: np.rec.recarray | np.ndarray,\n index,\n columns,\n dtype: DtypeObj | None,\n copy: bool,\n typ: str,\n) -> Manager:\n """\n Extract from a masked rec array and create the manager.\n """\n # essentially process a record array then fill it\n fdata = ma.getdata(data)\n if index is None:\n index = default_index(len(fdata))\n else:\n index = ensure_index(index)\n\n if columns is not None:\n columns = ensure_index(columns)\n arrays, arr_columns = to_arrays(fdata, columns)\n\n # create the manager\n\n arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index))\n if columns is None:\n columns = arr_columns\n\n mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ)\n\n if copy:\n mgr = mgr.copy()\n return mgr\n\n\ndef mgr_to_mgr(mgr, typ: str, copy: bool = True) -> Manager:\n """\n Convert to specific type of Manager. Does not copy if the type is already\n correct. Does not guarantee a copy otherwise. `copy` keyword only controls\n whether conversion from Block->ArrayManager copies the 1D arrays.\n """\n new_mgr: Manager\n\n if typ == "block":\n if isinstance(mgr, BlockManager):\n new_mgr = mgr\n else:\n if mgr.ndim == 2:\n new_mgr = arrays_to_mgr(\n mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block"\n )\n else:\n new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index)\n elif typ == "array":\n if isinstance(mgr, ArrayManager):\n new_mgr = mgr\n else:\n if mgr.ndim == 2:\n arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))]\n if copy:\n arrays = [arr.copy() for arr in arrays]\n new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]])\n else:\n array = mgr.internal_values()\n if copy:\n array = array.copy()\n new_mgr = SingleArrayManager([array], [mgr.index])\n else:\n raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")\n return new_mgr\n\n\n# ---------------------------------------------------------------------\n# DataFrame Constructor Interface\n\n\ndef ndarray_to_mgr(\n values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str\n) -> Manager:\n # used in DataFrame.__init__\n # input must be a ndarray, list, Series, Index, ExtensionArray\n\n if isinstance(values, ABCSeries):\n if columns is None:\n if values.name is not None:\n columns = Index([values.name])\n if index is None:\n index = values.index\n else:\n values = values.reindex(index)\n\n # zero len case (GH #2234)\n if not len(values) and columns is not None and len(columns):\n values = np.empty((0, 1), dtype=object)\n\n # if the array preparation does a copy -> avoid this for ArrayManager,\n # since the copy is done on conversion to 1D arrays\n copy_on_sanitize = False if typ == "array" else copy\n\n vdtype = getattr(values, "dtype", None)\n refs = None\n if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype):\n # GH#19157\n\n if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1:\n # GH#12513 a EA dtype passed with a 2D array, split into\n # multiple EAs that view the values\n # error: No overload variant of "__getitem__" of "ExtensionArray"\n # matches argument type "Tuple[slice, int]"\n values = [\n values[:, n] # type: ignore[call-overload]\n for n in range(values.shape[1])\n ]\n else:\n values = [values]\n\n if columns is None:\n columns = Index(range(len(values)))\n else:\n columns = ensure_index(columns)\n\n return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ)\n\n elif isinstance(vdtype, ExtensionDtype):\n # i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype)\n # are already caught above\n values = extract_array(values, extract_numpy=True)\n if copy:\n values = values.copy()\n if values.ndim == 1:\n values = values.reshape(-1, 1)\n\n elif isinstance(values, (ABCSeries, Index)):\n if not copy_on_sanitize and (\n dtype is None or astype_is_view(values.dtype, dtype)\n ):\n refs = values._references\n\n if copy_on_sanitize:\n values = values._values.copy()\n else:\n values = values._values\n\n values = _ensure_2d(values)\n\n elif isinstance(values, (np.ndarray, ExtensionArray)):\n # drop subclass info\n if copy_on_sanitize and (dtype is None or astype_is_view(values.dtype, dtype)):\n # only force a copy now if copy=True was requested\n # and a subsequent `astype` will not already result in a copy\n values = np.array(values, copy=True, order="F")\n else:\n values = np.asarray(values)\n values = _ensure_2d(values)\n\n else:\n # by definition an array here\n # the dtypes will be coerced to a single dtype\n values = _prep_ndarraylike(values, copy=copy_on_sanitize)\n\n if dtype is not None and values.dtype != dtype:\n # GH#40110 see similar check inside sanitize_array\n values = sanitize_array(\n values,\n None,\n dtype=dtype,\n copy=copy_on_sanitize,\n allow_2d=True,\n )\n\n # _prep_ndarraylike ensures that values.ndim == 2 at this point\n index, columns = _get_axes(\n values.shape[0], values.shape[1], index=index, columns=columns\n )\n\n _check_values_indices_shape_match(values, index, columns)\n\n if typ == "array":\n if issubclass(values.dtype.type, str):\n values = np.array(values, dtype=object)\n\n if dtype is None and is_object_dtype(values.dtype):\n arrays = [\n ensure_wrapped_if_datetimelike(\n maybe_infer_to_datetimelike(values[:, i])\n )\n for i in range(values.shape[1])\n ]\n else:\n if lib.is_np_dtype(values.dtype, "mM"):\n values = ensure_wrapped_if_datetimelike(values)\n arrays = [values[:, i] for i in range(values.shape[1])]\n\n if copy:\n arrays = [arr.copy() for arr in arrays]\n\n return ArrayManager(arrays, [index, columns], verify_integrity=False)\n\n values = values.T\n\n # if we don't have a dtype specified, then try to convert objects\n # on the entire block; this is to convert if we have datetimelike's\n # embedded in an object type\n if dtype is None and is_object_dtype(values.dtype):\n obj_columns = list(values)\n maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns]\n # don't convert (and copy) the objects if no type inference occurs\n if any(x is not y for x, y in zip(obj_columns, maybe_datetime)):\n dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime]\n block_values = [\n new_block_2d(dvals_list[n], placement=BlockPlacement(n))\n for n in range(len(dvals_list))\n ]\n else:\n bp = BlockPlacement(slice(len(columns)))\n nb = new_block_2d(values, placement=bp, refs=refs)\n block_values = [nb]\n elif dtype is None and values.dtype.kind == "U" and using_string_dtype():\n dtype = StringDtype(na_value=np.nan)\n\n obj_columns = list(values)\n block_values = [\n new_block(\n dtype.construct_array_type()._from_sequence(data, dtype=dtype),\n BlockPlacement(slice(i, i + 1)),\n ndim=2,\n )\n for i, data in enumerate(obj_columns)\n ]\n\n else:\n bp = BlockPlacement(slice(len(columns)))\n nb = new_block_2d(values, placement=bp, refs=refs)\n block_values = [nb]\n\n if len(columns) == 0:\n # TODO: check len(values) == 0?\n block_values = []\n\n return create_block_manager_from_blocks(\n block_values, [columns, index], verify_integrity=False\n )\n\n\ndef _check_values_indices_shape_match(\n values: np.ndarray, index: Index, columns: Index\n) -> None:\n """\n Check that the shape implied by our axes matches the actual shape of the\n data.\n """\n if values.shape[1] != len(columns) or values.shape[0] != len(index):\n # Could let this raise in Block constructor, but we get a more\n # helpful exception message this way.\n if values.shape[0] == 0 < len(index):\n raise ValueError("Empty data passed with indices specified.")\n\n passed = values.shape\n implied = (len(index), len(columns))\n raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")\n\n\ndef dict_to_mgr(\n data: dict,\n index,\n columns,\n *,\n dtype: DtypeObj | None = None,\n typ: str = "block",\n copy: bool = True,\n) -> Manager:\n """\n Segregate Series based on type and coerce into matrices.\n Needs to handle a lot of exceptional cases.\n\n Used in DataFrame.__init__\n """\n arrays: Sequence[Any] | Series\n\n if columns is not None:\n from pandas.core.series import Series\n\n arrays = Series(data, index=columns, dtype=object)\n missing = arrays.isna()\n if index is None:\n # GH10856\n # raise ValueError if only scalars in dict\n index = _extract_index(arrays[~missing])\n else:\n index = ensure_index(index)\n\n # no obvious "empty" int column\n if missing.any() and not is_integer_dtype(dtype):\n nan_dtype: DtypeObj\n\n if dtype is not None:\n # calling sanitize_array ensures we don't mix-and-match\n # NA dtypes\n midxs = missing.values.nonzero()[0]\n for i in midxs:\n arr = sanitize_array(arrays.iat[i], index, dtype=dtype)\n arrays.iat[i] = arr\n else:\n # GH#1783\n nan_dtype = np.dtype("object")\n val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)\n nmissing = missing.sum()\n if copy:\n rhs = [val] * nmissing\n else:\n # GH#45369\n rhs = [val.copy() for _ in range(nmissing)]\n arrays.loc[missing] = rhs\n\n arrays = list(arrays)\n columns = ensure_index(columns)\n\n else:\n keys = list(data.keys())\n columns = Index(keys) if keys else default_index(0)\n arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]\n\n if copy:\n if typ == "block":\n # We only need to copy arrays that will not get consolidated, i.e.\n # only EA arrays\n arrays = [\n x.copy()\n if isinstance(x, ExtensionArray)\n else x.copy(deep=True)\n if (\n isinstance(x, Index)\n or isinstance(x, ABCSeries)\n and is_1d_only_ea_dtype(x.dtype)\n )\n else x\n for x in arrays\n ]\n else:\n # dtype check to exclude e.g. range objects, scalars\n arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays]\n\n return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)\n\n\ndef nested_data_to_arrays(\n data: Sequence,\n columns: Index | None,\n index: Index | None,\n dtype: DtypeObj | None,\n) -> tuple[list[ArrayLike], Index, Index]:\n """\n Convert a single sequence of arrays to multiple arrays.\n """\n # By the time we get here we have already checked treat_as_nested(data)\n\n if is_named_tuple(data[0]) and columns is None:\n columns = ensure_index(data[0]._fields)\n\n arrays, columns = to_arrays(data, columns, dtype=dtype)\n columns = ensure_index(columns)\n\n if index is None:\n if isinstance(data[0], ABCSeries):\n index = _get_names_from_index(data)\n else:\n index = default_index(len(data))\n\n return arrays, columns, index\n\n\ndef treat_as_nested(data) -> bool:\n """\n Check if we should use nested_data_to_arrays.\n """\n return (\n len(data) > 0\n and is_list_like(data[0])\n and getattr(data[0], "ndim", 1) == 1\n and not (isinstance(data, ExtensionArray) and data.ndim == 2)\n )\n\n\n# ---------------------------------------------------------------------\n\n\ndef _prep_ndarraylike(values, copy: bool = True) -> np.ndarray:\n # values is specifically _not_ ndarray, EA, Index, or Series\n # We only get here with `not treat_as_nested(values)`\n\n if len(values) == 0:\n # TODO: check for length-zero range, in which case return int64 dtype?\n # TODO: reuse anything in try_cast?\n return np.empty((0, 0), dtype=object)\n elif isinstance(values, range):\n arr = range_to_ndarray(values)\n return arr[..., np.newaxis]\n\n def convert(v):\n if not is_list_like(v) or isinstance(v, ABCDataFrame):\n return v\n\n v = extract_array(v, extract_numpy=True)\n res = maybe_convert_platform(v)\n # We don't do maybe_infer_to_datetimelike here bc we will end up doing\n # it column-by-column in ndarray_to_mgr\n return res\n\n # we could have a 1-dim or 2-dim list here\n # this is equiv of np.asarray, but does object conversion\n # and platform dtype preservation\n # does not convert e.g. [1, "a", True] to ["1", "a", "True"] like\n # np.asarray would\n if is_list_like(values[0]):\n values = np.array([convert(v) for v in values])\n elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:\n # GH#21861 see test_constructor_list_of_lists\n values = np.array([convert(v) for v in values])\n else:\n values = convert(values)\n\n return _ensure_2d(values)\n\n\ndef _ensure_2d(values: np.ndarray) -> np.ndarray:\n """\n Reshape 1D values, raise on anything else other than 2D.\n """\n if values.ndim == 1:\n values = values.reshape((values.shape[0], 1))\n elif values.ndim != 2:\n raise ValueError(f"Must pass 2-d input. shape={values.shape}")\n return values\n\n\ndef _homogenize(\n data, index: Index, dtype: DtypeObj | None\n) -> tuple[list[ArrayLike], list[Any]]:\n oindex = None\n homogenized = []\n # if the original array-like in `data` is a Series, keep track of this Series' refs\n refs: list[Any] = []\n\n for val in data:\n if isinstance(val, (ABCSeries, Index)):\n if dtype is not None:\n val = val.astype(dtype, copy=False)\n if isinstance(val, ABCSeries) and val.index is not index:\n # Forces alignment. No need to copy data since we\n # are putting it into an ndarray later\n val = val.reindex(index, copy=False)\n refs.append(val._references)\n val = val._values\n else:\n if isinstance(val, dict):\n # GH#41785 this _should_ be equivalent to (but faster than)\n # val = Series(val, index=index)._values\n if oindex is None:\n oindex = index.astype("O")\n\n if isinstance(index, (DatetimeIndex, TimedeltaIndex)):\n # see test_constructor_dict_datetime64_index\n val = dict_compat(val)\n else:\n # see test_constructor_subclass_dict\n val = dict(val)\n val = lib.fast_multiget(val, oindex._values, default=np.nan)\n\n val = sanitize_array(val, index, dtype=dtype, copy=False)\n com.require_length_match(val, index)\n refs.append(None)\n\n homogenized.append(val)\n\n return homogenized, refs\n\n\ndef _extract_index(data) -> Index:\n """\n Try to infer an Index from the passed data, raise ValueError on failure.\n """\n index: Index\n if len(data) == 0:\n return default_index(0)\n\n raw_lengths = []\n indexes: list[list[Hashable] | Index] = []\n\n have_raw_arrays = False\n have_series = False\n have_dicts = False\n\n for val in data:\n if isinstance(val, ABCSeries):\n have_series = True\n indexes.append(val.index)\n elif isinstance(val, dict):\n have_dicts = True\n indexes.append(list(val.keys()))\n elif is_list_like(val) and getattr(val, "ndim", 1) == 1:\n have_raw_arrays = True\n raw_lengths.append(len(val))\n elif isinstance(val, np.ndarray) and val.ndim > 1:\n raise ValueError("Per-column arrays must each be 1-dimensional")\n\n if not indexes and not raw_lengths:\n raise ValueError("If using all scalar values, you must pass an index")\n\n if have_series:\n index = union_indexes(indexes)\n elif have_dicts:\n index = union_indexes(indexes, sort=False)\n\n if have_raw_arrays:\n lengths = list(set(raw_lengths))\n if len(lengths) > 1:\n raise ValueError("All arrays must be of the same length")\n\n if have_dicts:\n raise ValueError(\n "Mixing dicts with non-Series may lead to ambiguous ordering."\n )\n\n if have_series:\n if lengths[0] != len(index):\n msg = (\n f"array length {lengths[0]} does not match index "\n f"length {len(index)}"\n )\n raise ValueError(msg)\n else:\n index = default_index(lengths[0])\n\n return ensure_index(index)\n\n\ndef reorder_arrays(\n arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int\n) -> tuple[list[ArrayLike], Index]:\n """\n Pre-emptively (cheaply) reindex arrays with new columns.\n """\n # reorder according to the columns\n if columns is not None:\n if not columns.equals(arr_columns):\n # if they are equal, there is nothing to do\n new_arrays: list[ArrayLike] = []\n indexer = arr_columns.get_indexer(columns)\n for i, k in enumerate(indexer):\n if k == -1:\n # by convention default is all-NaN object dtype\n arr = np.empty(length, dtype=object)\n arr.fill(np.nan)\n else:\n arr = arrays[k]\n new_arrays.append(arr)\n\n arrays = new_arrays\n arr_columns = columns\n\n return arrays, arr_columns\n\n\ndef _get_names_from_index(data) -> Index:\n has_some_name = any(getattr(s, "name", None) is not None for s in data)\n if not has_some_name:\n return default_index(len(data))\n\n index: list[Hashable] = list(range(len(data)))\n count = 0\n for i, s in enumerate(data):\n n = getattr(s, "name", None)\n if n is not None:\n index[i] = n\n else:\n index[i] = f"Unnamed {count}"\n count += 1\n\n return Index(index)\n\n\ndef _get_axes(\n N: int, K: int, index: Index | None, columns: Index | None\n) -> tuple[Index, Index]:\n # helper to create the axes as indexes\n # return axes or defaults\n\n if index is None:\n index = default_index(N)\n else:\n index = ensure_index(index)\n\n if columns is None:\n columns = default_index(K)\n else:\n columns = ensure_index(columns)\n return index, columns\n\n\ndef dataclasses_to_dicts(data):\n """\n Converts a list of dataclass instances to a list of dictionaries.\n\n Parameters\n ----------\n data : List[Type[dataclass]]\n\n Returns\n --------\n list_dict : List[dict]\n\n Examples\n --------\n >>> from dataclasses import dataclass\n >>> @dataclass\n ... class Point:\n ... x: int\n ... y: int\n\n >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)])\n [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]\n\n """\n from dataclasses import asdict\n\n return list(map(asdict, data))\n\n\n# ---------------------------------------------------------------------\n# Conversion of Inputs to Arrays\n\n\ndef to_arrays(\n data, columns: Index | None, dtype: DtypeObj | None = None\n) -> tuple[list[ArrayLike], Index]:\n """\n Return list of arrays, columns.\n\n Returns\n -------\n list[ArrayLike]\n These will become columns in a DataFrame.\n Index\n This will become frame.columns.\n\n Notes\n -----\n Ensures that len(result_arrays) == len(result_index).\n """\n\n if not len(data):\n if isinstance(data, np.ndarray):\n if data.dtype.names is not None:\n # i.e. numpy structured array\n columns = ensure_index(data.dtype.names)\n arrays = [data[name] for name in columns]\n\n if len(data) == 0:\n # GH#42456 the indexing above results in list of 2D ndarrays\n # TODO: is that an issue with numpy?\n for i, arr in enumerate(arrays):\n if arr.ndim == 2:\n arrays[i] = arr[:, 0]\n\n return arrays, columns\n return [], ensure_index([])\n\n elif isinstance(data, np.ndarray) and data.dtype.names is not None:\n # e.g. recarray\n columns = Index(list(data.dtype.names))\n arrays = [data[k] for k in columns]\n return arrays, columns\n\n if isinstance(data[0], (list, tuple)):\n arr = _list_to_arrays(data)\n elif isinstance(data[0], abc.Mapping):\n arr, columns = _list_of_dict_to_arrays(data, columns)\n elif isinstance(data[0], ABCSeries):\n arr, columns = _list_of_series_to_arrays(data, columns)\n else:\n # last ditch effort\n data = [tuple(x) for x in data]\n arr = _list_to_arrays(data)\n\n content, columns = _finalize_columns_and_data(arr, columns, dtype)\n return content, columns\n\n\ndef _list_to_arrays(data: list[tuple | list]) -> np.ndarray:\n # Returned np.ndarray has ndim = 2\n # Note: we already check len(data) > 0 before getting hre\n if isinstance(data[0], tuple):\n content = lib.to_object_array_tuples(data)\n else:\n # list of lists\n content = lib.to_object_array(data)\n return content\n\n\ndef _list_of_series_to_arrays(\n data: list,\n columns: Index | None,\n) -> tuple[np.ndarray, Index]:\n # returned np.ndarray has ndim == 2\n\n if columns is None:\n # We know pass_data is non-empty because data[0] is a Series\n pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]\n columns = get_objs_combined_axis(pass_data, sort=False)\n\n indexer_cache: dict[int, np.ndarray] = {}\n\n aligned_values = []\n for s in data:\n index = getattr(s, "index", None)\n if index is None:\n index = default_index(len(s))\n\n if id(index) in indexer_cache:\n indexer = indexer_cache[id(index)]\n else:\n indexer = indexer_cache[id(index)] = index.get_indexer(columns)\n\n values = extract_array(s, extract_numpy=True)\n aligned_values.append(algorithms.take_nd(values, indexer))\n\n content = np.vstack(aligned_values)\n return content, columns\n\n\ndef _list_of_dict_to_arrays(\n data: list[dict],\n columns: Index | None,\n) -> tuple[np.ndarray, Index]:\n """\n Convert list of dicts to numpy arrays\n\n if `columns` is not passed, column names are inferred from the records\n - for OrderedDict and dicts, the column names match\n the key insertion-order from the first record to the last.\n - For other kinds of dict-likes, the keys are lexically sorted.\n\n Parameters\n ----------\n data : iterable\n collection of records (OrderedDict, dict)\n columns: iterables or None\n\n Returns\n -------\n content : np.ndarray[object, ndim=2]\n columns : Index\n """\n if columns is None:\n gen = (list(x.keys()) for x in data)\n sort = not any(isinstance(d, dict) for d in data)\n pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)\n columns = ensure_index(pre_cols)\n\n # assure that they are of the base dict class and not of derived\n # classes\n data = [d if type(d) is dict else dict(d) for d in data] # noqa: E721\n\n content = lib.dicts_to_array(data, list(columns))\n return content, columns\n\n\ndef _finalize_columns_and_data(\n content: np.ndarray, # ndim == 2\n columns: Index | None,\n dtype: DtypeObj | None,\n) -> tuple[list[ArrayLike], Index]:\n """\n Ensure we have valid columns, cast object dtypes if possible.\n """\n contents = list(content.T)\n\n try:\n columns = _validate_or_indexify_columns(contents, columns)\n except AssertionError as err:\n # GH#26429 do not raise user-facing AssertionError\n raise ValueError(err) from err\n\n if len(contents) and contents[0].dtype == np.object_:\n contents = convert_object_array(contents, dtype=dtype)\n\n return contents, columns\n\n\ndef _validate_or_indexify_columns(\n content: list[np.ndarray], columns: Index | None\n) -> Index:\n """\n If columns is None, make numbers as column names; Otherwise, validate that\n columns have valid length.\n\n Parameters\n ----------\n content : list of np.ndarrays\n columns : Index or None\n\n Returns\n -------\n Index\n If columns is None, assign positional column index value as columns.\n\n Raises\n ------\n 1. AssertionError when content is not composed of list of lists, and if\n length of columns is not equal to length of content.\n 2. ValueError when content is list of lists, but length of each sub-list\n is not equal\n 3. ValueError when content is list of lists, but length of sub-list is\n not equal to length of content\n """\n if columns is None:\n columns = default_index(len(content))\n else:\n # Add mask for data which is composed of list of lists\n is_mi_list = isinstance(columns, list) and all(\n isinstance(col, list) for col in columns\n )\n\n if not is_mi_list and len(columns) != len(content): # pragma: no cover\n # caller's responsibility to check for this...\n raise AssertionError(\n f"{len(columns)} columns passed, passed data had "\n f"{len(content)} columns"\n )\n if is_mi_list:\n # check if nested list column, length of each sub-list should be equal\n if len({len(col) for col in columns}) > 1:\n raise ValueError(\n "Length of columns passed for MultiIndex columns is different"\n )\n\n # if columns is not empty and length of sublist is not equal to content\n if columns and len(columns[0]) != len(content):\n raise ValueError(\n f"{len(columns[0])} columns passed, passed data had "\n f"{len(content)} columns"\n )\n return columns\n\n\ndef convert_object_array(\n content: list[npt.NDArray[np.object_]],\n dtype: DtypeObj | None,\n dtype_backend: str = "numpy",\n coerce_float: bool = False,\n) -> list[ArrayLike]:\n """\n Internal function to convert object array.\n\n Parameters\n ----------\n content: List[np.ndarray]\n dtype: np.dtype or ExtensionDtype\n dtype_backend: Controls if nullable/pyarrow dtypes are returned.\n coerce_float: Cast floats that are integers to int.\n\n Returns\n -------\n List[ArrayLike]\n """\n # provide soft conversion of object dtypes\n\n def convert(arr):\n if dtype != np.dtype("O"):\n arr = lib.maybe_convert_objects(\n arr,\n try_float=coerce_float,\n convert_to_nullable_dtype=dtype_backend != "numpy",\n )\n # Notes on cases that get here 2023-02-15\n # 1) we DO get here when arr is all Timestamps and dtype=None\n # 2) disabling this doesn't break the world, so this must be\n # getting caught at a higher level\n # 3) passing convert_non_numeric to maybe_convert_objects get this right\n # 4) convert_non_numeric?\n\n if dtype is None:\n if arr.dtype == np.dtype("O"):\n # i.e. maybe_convert_objects didn't convert\n convert_to_nullable_dtype = dtype_backend != "numpy"\n arr = maybe_infer_to_datetimelike(arr, convert_to_nullable_dtype)\n if convert_to_nullable_dtype and arr.dtype == np.dtype("O"):\n new_dtype = StringDtype()\n arr_cls = new_dtype.construct_array_type()\n arr = arr_cls._from_sequence(arr, dtype=new_dtype)\n elif dtype_backend != "numpy" and isinstance(arr, np.ndarray):\n if arr.dtype.kind in "iufb":\n arr = pd_array(arr, copy=False)\n\n elif isinstance(dtype, ExtensionDtype):\n # TODO: test(s) that get here\n # TODO: try to de-duplicate this convert function with\n # core.construction functions\n cls = dtype.construct_array_type()\n arr = cls._from_sequence(arr, dtype=dtype, copy=False)\n elif dtype.kind in "mM":\n # This restriction is harmless bc these are the only cases\n # where maybe_cast_to_datetime is not a no-op.\n # Here we know:\n # 1) dtype.kind in "mM" and\n # 2) arr is either object or numeric dtype\n arr = maybe_cast_to_datetime(arr, dtype)\n\n return arr\n\n arrays = [convert(arr) for arr in content]\n\n return arrays\n | .venv\Lib\site-packages\pandas\core\internals\construction.py | construction.py | Python | 34,207 | 0.95 | 0.189189 | 0.128635 | vue-tools | 458 | 2024-07-07T23:21:42.422595 | BSD-3-Clause | false | aa5de568004b56cee7d0959e0a3fa7e6 |
from __future__ import annotations\n\nfrom collections.abc import (\n Hashable,\n Sequence,\n)\nimport itertools\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Literal,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import (\n using_copy_on_write,\n warn_copy_on_write,\n)\n\nfrom pandas._libs import (\n internals as libinternals,\n lib,\n)\nfrom pandas._libs.internals import (\n BlockPlacement,\n BlockValuesRefs,\n)\nfrom pandas._libs.tslibs import Timestamp\nfrom pandas.errors import PerformanceWarning\nfrom pandas.util._decorators import cache_readonly\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.cast import infer_dtype_from_scalar\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_1d_only_ea_dtype,\n is_list_like,\n)\nfrom pandas.core.dtypes.dtypes import (\n DatetimeTZDtype,\n ExtensionDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import (\n array_equals,\n isna,\n)\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import (\n ArrowExtensionArray,\n ArrowStringArray,\n DatetimeArray,\n)\nfrom pandas.core.arrays._mixins import NDArrayBackedExtensionArray\nfrom pandas.core.construction import (\n ensure_wrapped_if_datetimelike,\n extract_array,\n)\nfrom pandas.core.indexers import maybe_convert_indices\nfrom pandas.core.indexes.api import (\n Index,\n ensure_index,\n)\nfrom pandas.core.internals.base import (\n DataManager,\n SingleDataManager,\n ensure_np_dtype,\n interleaved_dtype,\n)\nfrom pandas.core.internals.blocks import (\n COW_WARNING_GENERAL_MSG,\n COW_WARNING_SETITEM_MSG,\n Block,\n NumpyBlock,\n ensure_block_shape,\n extend_blocks,\n get_block_type,\n maybe_coerce_values,\n new_block,\n new_block_2d,\n)\nfrom pandas.core.internals.ops import (\n blockwise_all,\n operate_blockwise,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n AxisInt,\n DtypeObj,\n QuantileInterpolation,\n Self,\n Shape,\n npt,\n )\n\n from pandas.api.extensions import ExtensionArray\n\n\nclass BaseBlockManager(DataManager):\n """\n Core internal data structure to implement DataFrame, Series, etc.\n\n Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a\n lightweight blocked set of labeled data to be manipulated by the DataFrame\n public API class\n\n Attributes\n ----------\n shape\n ndim\n axes\n values\n items\n\n Methods\n -------\n set_axis(axis, new_labels)\n copy(deep=True)\n\n get_dtypes\n\n apply(func, axes, block_filter_fn)\n\n get_bool_data\n get_numeric_data\n\n get_slice(slice_like, axis)\n get(label)\n iget(loc)\n\n take(indexer, axis)\n reindex_axis(new_labels, axis)\n reindex_indexer(new_labels, indexer, axis)\n\n delete(label)\n insert(loc, label, value)\n set(label, value)\n\n Parameters\n ----------\n blocks: Sequence of Block\n axes: Sequence of Index\n verify_integrity: bool, default True\n\n Notes\n -----\n This is *not* a public API class\n """\n\n __slots__ = ()\n\n _blknos: npt.NDArray[np.intp]\n _blklocs: npt.NDArray[np.intp]\n blocks: tuple[Block, ...]\n axes: list[Index]\n\n @property\n def ndim(self) -> int:\n raise NotImplementedError\n\n _known_consolidated: bool\n _is_consolidated: bool\n\n def __init__(self, blocks, axes, verify_integrity: bool = True) -> None:\n raise NotImplementedError\n\n @classmethod\n def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self:\n raise NotImplementedError\n\n @property\n def blknos(self) -> npt.NDArray[np.intp]:\n """\n Suppose we want to find the array corresponding to our i'th column.\n\n blknos[i] identifies the block from self.blocks that contains this column.\n\n blklocs[i] identifies the column of interest within\n self.blocks[self.blknos[i]]\n """\n if self._blknos is None:\n # Note: these can be altered by other BlockManager methods.\n self._rebuild_blknos_and_blklocs()\n\n return self._blknos\n\n @property\n def blklocs(self) -> npt.NDArray[np.intp]:\n """\n See blknos.__doc__\n """\n if self._blklocs is None:\n # Note: these can be altered by other BlockManager methods.\n self._rebuild_blknos_and_blklocs()\n\n return self._blklocs\n\n def make_empty(self, axes=None) -> Self:\n """return an empty BlockManager with the items axis of len 0"""\n if axes is None:\n axes = [Index([])] + self.axes[1:]\n\n # preserve dtype if possible\n if self.ndim == 1:\n assert isinstance(self, SingleBlockManager) # for mypy\n blk = self.blocks[0]\n arr = blk.values[:0]\n bp = BlockPlacement(slice(0, 0))\n nb = blk.make_block_same_class(arr, placement=bp)\n blocks = [nb]\n else:\n blocks = []\n return type(self).from_blocks(blocks, axes)\n\n def __nonzero__(self) -> bool:\n return True\n\n # Python3 compat\n __bool__ = __nonzero__\n\n def _normalize_axis(self, axis: AxisInt) -> int:\n # switch axis to follow BlockManager logic\n if self.ndim == 2:\n axis = 1 if axis == 0 else 0\n return axis\n\n def set_axis(self, axis: AxisInt, new_labels: Index) -> None:\n # Caller is responsible for ensuring we have an Index object.\n self._validate_set_axis(axis, new_labels)\n self.axes[axis] = new_labels\n\n @property\n def is_single_block(self) -> bool:\n # Assumes we are 2D; overridden by SingleBlockManager\n return len(self.blocks) == 1\n\n @property\n def items(self) -> Index:\n return self.axes[0]\n\n def _has_no_reference(self, i: int) -> bool:\n """\n Check for column `i` if it has references.\n (whether it references another array or is itself being referenced)\n Returns True if the column has no references.\n """\n blkno = self.blknos[i]\n return self._has_no_reference_block(blkno)\n\n def _has_no_reference_block(self, blkno: int) -> bool:\n """\n Check for block `i` if it has references.\n (whether it references another array or is itself being referenced)\n Returns True if the block has no references.\n """\n return not self.blocks[blkno].refs.has_reference()\n\n def add_references(self, mgr: BaseBlockManager) -> None:\n """\n Adds the references from one manager to another. We assume that both\n managers have the same block structure.\n """\n if len(self.blocks) != len(mgr.blocks):\n # If block structure changes, then we made a copy\n return\n for i, blk in enumerate(self.blocks):\n blk.refs = mgr.blocks[i].refs\n blk.refs.add_reference(blk)\n\n def references_same_values(self, mgr: BaseBlockManager, blkno: int) -> bool:\n """\n Checks if two blocks from two different block managers reference the\n same underlying values.\n """\n blk = self.blocks[blkno]\n return any(blk is ref() for ref in mgr.blocks[blkno].refs.referenced_blocks)\n\n def get_dtypes(self) -> npt.NDArray[np.object_]:\n dtypes = np.array([blk.dtype for blk in self.blocks], dtype=object)\n return dtypes.take(self.blknos)\n\n @property\n def arrays(self) -> list[ArrayLike]:\n """\n Quick access to the backing arrays of the Blocks.\n\n Only for compatibility with ArrayManager for testing convenience.\n Not to be used in actual code, and return value is not the same as the\n ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs).\n\n Warning! The returned arrays don't handle Copy-on-Write, so this should\n be used with caution (only in read-mode).\n """\n return [blk.values for blk in self.blocks]\n\n def __repr__(self) -> str:\n output = type(self).__name__\n for i, ax in enumerate(self.axes):\n if i == 0:\n output += f"\nItems: {ax}"\n else:\n output += f"\nAxis {i}: {ax}"\n\n for block in self.blocks:\n output += f"\n{block}"\n return output\n\n def apply(\n self,\n f,\n align_keys: list[str] | None = None,\n **kwargs,\n ) -> Self:\n """\n Iterate over the blocks, collect and create a new BlockManager.\n\n Parameters\n ----------\n f : str or callable\n Name of the Block method to apply.\n align_keys: List[str] or None, default None\n **kwargs\n Keywords to pass to `f`\n\n Returns\n -------\n BlockManager\n """\n assert "filter" not in kwargs\n\n align_keys = align_keys or []\n result_blocks: list[Block] = []\n # fillna: Series/DataFrame is responsible for making sure value is aligned\n\n aligned_args = {k: kwargs[k] for k in align_keys}\n\n for b in self.blocks:\n if aligned_args:\n for k, obj in aligned_args.items():\n if isinstance(obj, (ABCSeries, ABCDataFrame)):\n # The caller is responsible for ensuring that\n # obj.axes[-1].equals(self.items)\n if obj.ndim == 1:\n kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values\n else:\n kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values\n else:\n # otherwise we have an ndarray\n kwargs[k] = obj[b.mgr_locs.indexer]\n\n if callable(f):\n applied = b.apply(f, **kwargs)\n else:\n applied = getattr(b, f)(**kwargs)\n result_blocks = extend_blocks(applied, result_blocks)\n\n out = type(self).from_blocks(result_blocks, self.axes)\n return out\n\n # Alias so we can share code with ArrayManager\n apply_with_block = apply\n\n def setitem(self, indexer, value, warn: bool = True) -> Self:\n """\n Set values with indexer.\n\n For SingleBlockManager, this backs s[indexer] = value\n """\n if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:\n raise ValueError(f"Cannot set values with ndim > {self.ndim}")\n\n if warn and warn_copy_on_write() and not self._has_no_reference(0):\n warnings.warn(\n COW_WARNING_GENERAL_MSG,\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n elif using_copy_on_write() and not self._has_no_reference(0):\n # this method is only called if there is a single block -> hardcoded 0\n # Split blocks to only copy the columns we want to modify\n if self.ndim == 2 and isinstance(indexer, tuple):\n blk_loc = self.blklocs[indexer[1]]\n if is_list_like(blk_loc) and blk_loc.ndim == 2:\n blk_loc = np.squeeze(blk_loc, axis=0)\n elif not is_list_like(blk_loc):\n # Keep dimension and copy data later\n blk_loc = [blk_loc] # type: ignore[assignment]\n if len(blk_loc) == 0:\n return self.copy(deep=False)\n\n values = self.blocks[0].values\n if values.ndim == 2:\n values = values[blk_loc]\n # "T" has no attribute "_iset_split_block"\n self._iset_split_block( # type: ignore[attr-defined]\n 0, blk_loc, values\n )\n # first block equals values\n self.blocks[0].setitem((indexer[0], np.arange(len(blk_loc))), value)\n return self\n # No need to split if we either set all columns or on a single block\n # manager\n self = self.copy()\n\n return self.apply("setitem", indexer=indexer, value=value)\n\n def diff(self, n: int) -> Self:\n # only reached with self.ndim == 2\n return self.apply("diff", n=n)\n\n def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self:\n if copy is None:\n if using_copy_on_write():\n copy = False\n else:\n copy = True\n elif using_copy_on_write():\n copy = False\n\n return self.apply(\n "astype",\n dtype=dtype,\n copy=copy,\n errors=errors,\n using_cow=using_copy_on_write(),\n )\n\n def convert(self, copy: bool | None) -> Self:\n if copy is None:\n if using_copy_on_write():\n copy = False\n else:\n copy = True\n elif using_copy_on_write():\n copy = False\n\n return self.apply("convert", copy=copy, using_cow=using_copy_on_write())\n\n def convert_dtypes(self, **kwargs):\n if using_copy_on_write():\n copy = False\n else:\n copy = True\n\n return self.apply(\n "convert_dtypes", copy=copy, using_cow=using_copy_on_write(), **kwargs\n )\n\n def get_values_for_csv(\n self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None\n ) -> Self:\n """\n Convert values to native types (strings / python objects) that are used\n in formatting (repr / csv).\n """\n return self.apply(\n "get_values_for_csv",\n na_rep=na_rep,\n quoting=quoting,\n float_format=float_format,\n date_format=date_format,\n decimal=decimal,\n )\n\n @property\n def any_extension_types(self) -> bool:\n """Whether any of the blocks in this manager are extension blocks"""\n return any(block.is_extension for block in self.blocks)\n\n @property\n def is_view(self) -> bool:\n """return a boolean if we are a single block and are a view"""\n if len(self.blocks) == 1:\n return self.blocks[0].is_view\n\n # It is technically possible to figure out which blocks are views\n # e.g. [ b.values.base is not None for b in self.blocks ]\n # but then we have the case of possibly some blocks being a view\n # and some blocks not. setting in theory is possible on the non-view\n # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit\n # complicated\n\n return False\n\n def _get_data_subset(self, predicate: Callable) -> Self:\n blocks = [blk for blk in self.blocks if predicate(blk.values)]\n return self._combine(blocks)\n\n def get_bool_data(self) -> Self:\n """\n Select blocks that are bool-dtype and columns from object-dtype blocks\n that are all-bool.\n """\n\n new_blocks = []\n\n for blk in self.blocks:\n if blk.dtype == bool:\n new_blocks.append(blk)\n\n elif blk.is_object:\n nbs = blk._split()\n new_blocks.extend(nb for nb in nbs if nb.is_bool)\n\n return self._combine(new_blocks)\n\n def get_numeric_data(self) -> Self:\n numeric_blocks = [blk for blk in self.blocks if blk.is_numeric]\n if len(numeric_blocks) == len(self.blocks):\n # Avoid somewhat expensive _combine\n return self\n return self._combine(numeric_blocks)\n\n def _combine(self, blocks: list[Block], index: Index | None = None) -> Self:\n """return a new manager with the blocks"""\n if len(blocks) == 0:\n if self.ndim == 2:\n # retain our own Index dtype\n if index is not None:\n axes = [self.items[:0], index]\n else:\n axes = [self.items[:0]] + self.axes[1:]\n return self.make_empty(axes)\n return self.make_empty()\n\n # FIXME: optimization potential\n indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))\n inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])\n\n new_blocks: list[Block] = []\n for b in blocks:\n nb = b.copy(deep=False)\n nb.mgr_locs = BlockPlacement(inv_indexer[nb.mgr_locs.indexer])\n new_blocks.append(nb)\n\n axes = list(self.axes)\n if index is not None:\n axes[-1] = index\n axes[0] = self.items.take(indexer)\n\n return type(self).from_blocks(new_blocks, axes)\n\n @property\n def nblocks(self) -> int:\n return len(self.blocks)\n\n def copy(self, deep: bool | None | Literal["all"] = True) -> Self:\n """\n Make deep or shallow copy of BlockManager\n\n Parameters\n ----------\n deep : bool, string or None, default True\n If False or None, return a shallow copy (do not copy data)\n If 'all', copy data and a deep copy of the index\n\n Returns\n -------\n BlockManager\n """\n if deep is None:\n if using_copy_on_write():\n # use shallow copy\n deep = False\n else:\n # preserve deep copy for BlockManager with copy=None\n deep = True\n\n # this preserves the notion of view copying of axes\n if deep:\n # hit in e.g. tests.io.json.test_pandas\n\n def copy_func(ax):\n return ax.copy(deep=True) if deep == "all" else ax.view()\n\n new_axes = [copy_func(ax) for ax in self.axes]\n else:\n if using_copy_on_write():\n new_axes = [ax.view() for ax in self.axes]\n else:\n new_axes = list(self.axes)\n\n res = self.apply("copy", deep=deep)\n res.axes = new_axes\n\n if self.ndim > 1:\n # Avoid needing to re-compute these\n blknos = self._blknos\n if blknos is not None:\n res._blknos = blknos.copy()\n res._blklocs = self._blklocs.copy()\n\n if deep:\n res._consolidate_inplace()\n return res\n\n def consolidate(self) -> Self:\n """\n Join together blocks having same dtype\n\n Returns\n -------\n y : BlockManager\n """\n if self.is_consolidated():\n return self\n\n bm = type(self)(self.blocks, self.axes, verify_integrity=False)\n bm._is_consolidated = False\n bm._consolidate_inplace()\n return bm\n\n def reindex_indexer(\n self,\n new_axis: Index,\n indexer: npt.NDArray[np.intp] | None,\n axis: AxisInt,\n fill_value=None,\n allow_dups: bool = False,\n copy: bool | None = True,\n only_slice: bool = False,\n *,\n use_na_proxy: bool = False,\n ) -> Self:\n """\n Parameters\n ----------\n new_axis : Index\n indexer : ndarray[intp] or None\n axis : int\n fill_value : object, default None\n allow_dups : bool, default False\n copy : bool or None, default True\n If None, regard as False to get shallow copy.\n only_slice : bool, default False\n Whether to take views, not copies, along columns.\n use_na_proxy : bool, default False\n Whether to use a np.void ndarray for newly introduced columns.\n\n pandas-indexer with -1's only.\n """\n if copy is None:\n if using_copy_on_write():\n # use shallow copy\n copy = False\n else:\n # preserve deep copy for BlockManager with copy=None\n copy = True\n\n if indexer is None:\n if new_axis is self.axes[axis] and not copy:\n return self\n\n result = self.copy(deep=copy)\n result.axes = list(self.axes)\n result.axes[axis] = new_axis\n return result\n\n # Should be intp, but in some cases we get int64 on 32bit builds\n assert isinstance(indexer, np.ndarray)\n\n # some axes don't allow reindexing with dups\n if not allow_dups:\n self.axes[axis]._validate_can_reindex(indexer)\n\n if axis >= self.ndim:\n raise IndexError("Requested axis not found in manager")\n\n if axis == 0:\n new_blocks = self._slice_take_blocks_ax0(\n indexer,\n fill_value=fill_value,\n only_slice=only_slice,\n use_na_proxy=use_na_proxy,\n )\n else:\n new_blocks = [\n blk.take_nd(\n indexer,\n axis=1,\n fill_value=(\n fill_value if fill_value is not None else blk.fill_value\n ),\n )\n for blk in self.blocks\n ]\n\n new_axes = list(self.axes)\n new_axes[axis] = new_axis\n\n new_mgr = type(self).from_blocks(new_blocks, new_axes)\n if axis == 1:\n # We can avoid the need to rebuild these\n new_mgr._blknos = self.blknos.copy()\n new_mgr._blklocs = self.blklocs.copy()\n return new_mgr\n\n def _slice_take_blocks_ax0(\n self,\n slice_or_indexer: slice | np.ndarray,\n fill_value=lib.no_default,\n only_slice: bool = False,\n *,\n use_na_proxy: bool = False,\n ref_inplace_op: bool = False,\n ) -> list[Block]:\n """\n Slice/take blocks along axis=0.\n\n Overloaded for SingleBlock\n\n Parameters\n ----------\n slice_or_indexer : slice or np.ndarray[int64]\n fill_value : scalar, default lib.no_default\n only_slice : bool, default False\n If True, we always return views on existing arrays, never copies.\n This is used when called from ops.blockwise.operate_blockwise.\n use_na_proxy : bool, default False\n Whether to use a np.void ndarray for newly introduced columns.\n ref_inplace_op: bool, default False\n Don't track refs if True because we operate inplace\n\n Returns\n -------\n new_blocks : list of Block\n """\n allow_fill = fill_value is not lib.no_default\n\n sl_type, slobj, sllen = _preprocess_slice_or_indexer(\n slice_or_indexer, self.shape[0], allow_fill=allow_fill\n )\n\n if self.is_single_block:\n blk = self.blocks[0]\n\n if sl_type == "slice":\n # GH#32959 EABlock would fail since we can't make 0-width\n # TODO(EA2D): special casing unnecessary with 2D EAs\n if sllen == 0:\n return []\n bp = BlockPlacement(slice(0, sllen))\n return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)]\n elif not allow_fill or self.ndim == 1:\n if allow_fill and fill_value is None:\n fill_value = blk.fill_value\n\n if not allow_fill and only_slice:\n # GH#33597 slice instead of take, so we get\n # views instead of copies\n blocks = [\n blk.getitem_block_columns(\n slice(ml, ml + 1),\n new_mgr_locs=BlockPlacement(i),\n ref_inplace_op=ref_inplace_op,\n )\n for i, ml in enumerate(slobj)\n ]\n return blocks\n else:\n bp = BlockPlacement(slice(0, sllen))\n return [\n blk.take_nd(\n slobj,\n axis=0,\n new_mgr_locs=bp,\n fill_value=fill_value,\n )\n ]\n\n if sl_type == "slice":\n blknos = self.blknos[slobj]\n blklocs = self.blklocs[slobj]\n else:\n blknos = algos.take_nd(\n self.blknos, slobj, fill_value=-1, allow_fill=allow_fill\n )\n blklocs = algos.take_nd(\n self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill\n )\n\n # When filling blknos, make sure blknos is updated before appending to\n # blocks list, that way new blkno is exactly len(blocks).\n blocks = []\n group = not only_slice\n for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):\n if blkno == -1:\n # If we've got here, fill_value was not lib.no_default\n\n blocks.append(\n self._make_na_block(\n placement=mgr_locs,\n fill_value=fill_value,\n use_na_proxy=use_na_proxy,\n )\n )\n else:\n blk = self.blocks[blkno]\n\n # Otherwise, slicing along items axis is necessary.\n if not blk._can_consolidate and not blk._validate_ndim:\n # i.e. we dont go through here for DatetimeTZBlock\n # A non-consolidatable block, it's easy, because there's\n # only one item and each mgr loc is a copy of that single\n # item.\n deep = not (only_slice or using_copy_on_write())\n for mgr_loc in mgr_locs:\n newblk = blk.copy(deep=deep)\n newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1))\n blocks.append(newblk)\n\n else:\n # GH#32779 to avoid the performance penalty of copying,\n # we may try to only slice\n taker = blklocs[mgr_locs.indexer]\n max_len = max(len(mgr_locs), taker.max() + 1)\n if only_slice or using_copy_on_write():\n taker = lib.maybe_indices_to_slice(taker, max_len)\n\n if isinstance(taker, slice):\n nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs)\n blocks.append(nb)\n elif only_slice:\n # GH#33597 slice instead of take, so we get\n # views instead of copies\n for i, ml in zip(taker, mgr_locs):\n slc = slice(i, i + 1)\n bp = BlockPlacement(ml)\n nb = blk.getitem_block_columns(slc, new_mgr_locs=bp)\n # We have np.shares_memory(nb.values, blk.values)\n blocks.append(nb)\n else:\n nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)\n blocks.append(nb)\n\n return blocks\n\n def _make_na_block(\n self, placement: BlockPlacement, fill_value=None, use_na_proxy: bool = False\n ) -> Block:\n # Note: we only get here with self.ndim == 2\n\n if use_na_proxy:\n assert fill_value is None\n shape = (len(placement), self.shape[1])\n vals = np.empty(shape, dtype=np.void)\n nb = NumpyBlock(vals, placement, ndim=2)\n return nb\n\n if fill_value is None:\n fill_value = np.nan\n\n shape = (len(placement), self.shape[1])\n\n dtype, fill_value = infer_dtype_from_scalar(fill_value)\n block_values = make_na_array(dtype, shape, fill_value)\n return new_block_2d(block_values, placement=placement)\n\n def take(\n self,\n indexer: npt.NDArray[np.intp],\n axis: AxisInt = 1,\n verify: bool = True,\n ) -> Self:\n """\n Take items along any axis.\n\n indexer : np.ndarray[np.intp]\n axis : int, default 1\n verify : bool, default True\n Check that all entries are between 0 and len(self) - 1, inclusive.\n Pass verify=False if this check has been done by the caller.\n\n Returns\n -------\n BlockManager\n """\n # Caller is responsible for ensuring indexer annotation is accurate\n\n n = self.shape[axis]\n indexer = maybe_convert_indices(indexer, n, verify=verify)\n\n new_labels = self.axes[axis].take(indexer)\n return self.reindex_indexer(\n new_axis=new_labels,\n indexer=indexer,\n axis=axis,\n allow_dups=True,\n copy=None,\n )\n\n\nclass BlockManager(libinternals.BlockManager, BaseBlockManager):\n """\n BaseBlockManager that holds 2D blocks.\n """\n\n ndim = 2\n\n # ----------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n blocks: Sequence[Block],\n axes: Sequence[Index],\n verify_integrity: bool = True,\n ) -> None:\n if verify_integrity:\n # Assertion disabled for performance\n # assert all(isinstance(x, Index) for x in axes)\n\n for block in blocks:\n if self.ndim != block.ndim:\n raise AssertionError(\n f"Number of Block dimensions ({block.ndim}) must equal "\n f"number of axes ({self.ndim})"\n )\n # As of 2.0, the caller is responsible for ensuring that\n # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2;\n # previously there was a special check for fastparquet compat.\n\n self._verify_integrity()\n\n def _verify_integrity(self) -> None:\n mgr_shape = self.shape\n tot_items = sum(len(x.mgr_locs) for x in self.blocks)\n for block in self.blocks:\n if block.shape[1:] != mgr_shape[1:]:\n raise_construction_error(tot_items, block.shape[1:], self.axes)\n if len(self.items) != tot_items:\n raise AssertionError(\n "Number of manager items must equal union of "\n f"block items\n# manager items: {len(self.items)}, # "\n f"tot_items: {tot_items}"\n )\n\n @classmethod\n def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self:\n """\n Constructor for BlockManager and SingleBlockManager with same signature.\n """\n return cls(blocks, axes, verify_integrity=False)\n\n # ----------------------------------------------------------------\n # Indexing\n\n def fast_xs(self, loc: int) -> SingleBlockManager:\n """\n Return the array corresponding to `frame.iloc[loc]`.\n\n Parameters\n ----------\n loc : int\n\n Returns\n -------\n np.ndarray or ExtensionArray\n """\n if len(self.blocks) == 1:\n # TODO: this could be wrong if blk.mgr_locs is not slice(None)-like;\n # is this ruled out in the general case?\n result = self.blocks[0].iget((slice(None), loc))\n # in the case of a single block, the new block is a view\n bp = BlockPlacement(slice(0, len(result)))\n block = new_block(\n result,\n placement=bp,\n ndim=1,\n refs=self.blocks[0].refs,\n )\n return SingleBlockManager(block, self.axes[0])\n\n dtype = interleaved_dtype([blk.dtype for blk in self.blocks])\n\n n = len(self)\n\n if isinstance(dtype, ExtensionDtype):\n # TODO: use object dtype as workaround for non-performant\n # EA.__setitem__ methods. (primarily ArrowExtensionArray.__setitem__\n # when iteratively setting individual values)\n # https://github.com/pandas-dev/pandas/pull/54508#issuecomment-1675827918\n result = np.empty(n, dtype=object)\n else:\n result = np.empty(n, dtype=dtype)\n result = ensure_wrapped_if_datetimelike(result)\n\n for blk in self.blocks:\n # Such assignment may incorrectly coerce NaT to None\n # result[blk.mgr_locs] = blk._slice((slice(None), loc))\n for i, rl in enumerate(blk.mgr_locs):\n result[rl] = blk.iget((i, loc))\n\n if isinstance(dtype, ExtensionDtype):\n cls = dtype.construct_array_type()\n result = cls._from_sequence(result, dtype=dtype)\n\n bp = BlockPlacement(slice(0, len(result)))\n block = new_block(result, placement=bp, ndim=1)\n return SingleBlockManager(block, self.axes[0])\n\n def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager:\n """\n Return the data as a SingleBlockManager.\n """\n block = self.blocks[self.blknos[i]]\n values = block.iget(self.blklocs[i])\n\n # shortcut for select a single-dim from a 2-dim BM\n bp = BlockPlacement(slice(0, len(values)))\n nb = type(block)(\n values, placement=bp, ndim=1, refs=block.refs if track_ref else None\n )\n return SingleBlockManager(nb, self.axes[1])\n\n def iget_values(self, i: int) -> ArrayLike:\n """\n Return the data for column i as the values (ndarray or ExtensionArray).\n\n Warning! The returned array is a view but doesn't handle Copy-on-Write,\n so this should be used with caution.\n """\n # TODO(CoW) making the arrays read-only might make this safer to use?\n block = self.blocks[self.blknos[i]]\n values = block.iget(self.blklocs[i])\n return values\n\n @property\n def column_arrays(self) -> list[np.ndarray]:\n """\n Used in the JSON C code to access column arrays.\n This optimizes compared to using `iget_values` by converting each\n\n Warning! This doesn't handle Copy-on-Write, so should be used with\n caution (current use case of consuming this in the JSON code is fine).\n """\n # This is an optimized equivalent to\n # result = [self.iget_values(i) for i in range(len(self.items))]\n result: list[np.ndarray | None] = [None] * len(self.items)\n\n for blk in self.blocks:\n mgr_locs = blk._mgr_locs\n values = blk.array_values._values_for_json()\n if values.ndim == 1:\n # TODO(EA2D): special casing not needed with 2D EAs\n result[mgr_locs[0]] = values\n\n else:\n for i, loc in enumerate(mgr_locs):\n result[loc] = values[i]\n\n # error: Incompatible return value type (got "List[None]",\n # expected "List[ndarray[Any, Any]]")\n return result # type: ignore[return-value]\n\n def iset(\n self,\n loc: int | slice | np.ndarray,\n value: ArrayLike,\n inplace: bool = False,\n refs: BlockValuesRefs | None = None,\n ) -> None:\n """\n Set new item in-place. Does not consolidate. Adds new Block if not\n contained in the current set of items\n """\n\n # FIXME: refactor, clearly separate broadcasting & zip-like assignment\n # can prob also fix the various if tests for sparse/categorical\n if self._blklocs is None and self.ndim > 1:\n self._rebuild_blknos_and_blklocs()\n\n # Note: we exclude DTA/TDA here\n value_is_extension_type = is_1d_only_ea_dtype(value.dtype)\n if not value_is_extension_type:\n if value.ndim == 2:\n value = value.T\n else:\n value = ensure_block_shape(value, ndim=2)\n\n if value.shape[1:] != self.shape[1:]:\n raise AssertionError(\n "Shape of new values must be compatible with manager shape"\n )\n\n if lib.is_integer(loc):\n # We have 6 tests where loc is _not_ an int.\n # In this case, get_blkno_placements will yield only one tuple,\n # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1)))\n\n # Check if we can use _iset_single fastpath\n loc = cast(int, loc)\n blkno = self.blknos[loc]\n blk = self.blocks[blkno]\n if len(blk._mgr_locs) == 1: # TODO: fastest way to check this?\n return self._iset_single(\n loc,\n value,\n inplace=inplace,\n blkno=blkno,\n blk=blk,\n refs=refs,\n )\n\n # error: Incompatible types in assignment (expression has type\n # "List[Union[int, slice, ndarray]]", variable has type "Union[int,\n # slice, ndarray]")\n loc = [loc] # type: ignore[assignment]\n\n # categorical/sparse/datetimetz\n if value_is_extension_type:\n\n def value_getitem(placement):\n return value\n\n else:\n\n def value_getitem(placement):\n return value[placement.indexer]\n\n # Accessing public blknos ensures the public versions are initialized\n blknos = self.blknos[loc]\n blklocs = self.blklocs[loc].copy()\n\n unfit_mgr_locs = []\n unfit_val_locs = []\n removed_blknos = []\n for blkno_l, val_locs in libinternals.get_blkno_placements(blknos, group=True):\n blk = self.blocks[blkno_l]\n blk_locs = blklocs[val_locs.indexer]\n if inplace and blk.should_store(value):\n # Updating inplace -> check if we need to do Copy-on-Write\n if using_copy_on_write() and not self._has_no_reference_block(blkno_l):\n self._iset_split_block(\n blkno_l, blk_locs, value_getitem(val_locs), refs=refs\n )\n else:\n blk.set_inplace(blk_locs, value_getitem(val_locs))\n continue\n else:\n unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])\n unfit_val_locs.append(val_locs)\n\n # If all block items are unfit, schedule the block for removal.\n if len(val_locs) == len(blk.mgr_locs):\n removed_blknos.append(blkno_l)\n continue\n else:\n # Defer setting the new values to enable consolidation\n self._iset_split_block(blkno_l, blk_locs, refs=refs)\n\n if len(removed_blknos):\n # Remove blocks & update blknos accordingly\n is_deleted = np.zeros(self.nblocks, dtype=np.bool_)\n is_deleted[removed_blknos] = True\n\n new_blknos = np.empty(self.nblocks, dtype=np.intp)\n new_blknos.fill(-1)\n new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))\n self._blknos = new_blknos[self._blknos]\n self.blocks = tuple(\n blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)\n )\n\n if unfit_val_locs:\n unfit_idxr = np.concatenate(unfit_mgr_locs)\n unfit_count = len(unfit_idxr)\n\n new_blocks: list[Block] = []\n if value_is_extension_type:\n # This code (ab-)uses the fact that EA blocks contain only\n # one item.\n # TODO(EA2D): special casing unnecessary with 2D EAs\n new_blocks.extend(\n new_block_2d(\n values=value,\n placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)),\n refs=refs,\n )\n for mgr_loc in unfit_idxr\n )\n\n self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks)\n self._blklocs[unfit_idxr] = 0\n\n else:\n # unfit_val_locs contains BlockPlacement objects\n unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])\n\n new_blocks.append(\n new_block_2d(\n values=value_getitem(unfit_val_items),\n placement=BlockPlacement(unfit_idxr),\n refs=refs,\n )\n )\n\n self._blknos[unfit_idxr] = len(self.blocks)\n self._blklocs[unfit_idxr] = np.arange(unfit_count)\n\n self.blocks += tuple(new_blocks)\n\n # Newly created block's dtype may already be present.\n self._known_consolidated = False\n\n def _iset_split_block(\n self,\n blkno_l: int,\n blk_locs: np.ndarray | list[int],\n value: ArrayLike | None = None,\n refs: BlockValuesRefs | None = None,\n ) -> None:\n """Removes columns from a block by splitting the block.\n\n Avoids copying the whole block through slicing and updates the manager\n after determinint the new block structure. Optionally adds a new block,\n otherwise has to be done by the caller.\n\n Parameters\n ----------\n blkno_l: The block number to operate on, relevant for updating the manager\n blk_locs: The locations of our block that should be deleted.\n value: The value to set as a replacement.\n refs: The reference tracking object of the value to set.\n """\n blk = self.blocks[blkno_l]\n\n if self._blklocs is None:\n self._rebuild_blknos_and_blklocs()\n\n nbs_tup = tuple(blk.delete(blk_locs))\n if value is not None:\n locs = blk.mgr_locs.as_array[blk_locs]\n first_nb = new_block_2d(value, BlockPlacement(locs), refs=refs)\n else:\n first_nb = nbs_tup[0]\n nbs_tup = tuple(nbs_tup[1:])\n\n nr_blocks = len(self.blocks)\n blocks_tup = (\n self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup\n )\n self.blocks = blocks_tup\n\n if not nbs_tup and value is not None:\n # No need to update anything if split did not happen\n return\n\n self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb))\n\n for i, nb in enumerate(nbs_tup):\n self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb))\n self._blknos[nb.mgr_locs.indexer] = i + nr_blocks\n\n def _iset_single(\n self,\n loc: int,\n value: ArrayLike,\n inplace: bool,\n blkno: int,\n blk: Block,\n refs: BlockValuesRefs | None = None,\n ) -> None:\n """\n Fastpath for iset when we are only setting a single position and\n the Block currently in that position is itself single-column.\n\n In this case we can swap out the entire Block and blklocs and blknos\n are unaffected.\n """\n # Caller is responsible for verifying value.shape\n\n if inplace and blk.should_store(value):\n copy = False\n if using_copy_on_write() and not self._has_no_reference_block(blkno):\n # perform Copy-on-Write and clear the reference\n copy = True\n iloc = self.blklocs[loc]\n blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy)\n return\n\n nb = new_block_2d(value, placement=blk._mgr_locs, refs=refs)\n old_blocks = self.blocks\n new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :]\n self.blocks = new_blocks\n return\n\n def column_setitem(\n self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False\n ) -> None:\n """\n Set values ("setitem") into a single column (not setting the full column).\n\n This is a method on the BlockManager level, to avoid creating an\n intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`)\n """\n needs_to_warn = False\n if warn_copy_on_write() and not self._has_no_reference(loc):\n if not isinstance(\n self.blocks[self.blknos[loc]].values,\n (ArrowExtensionArray, ArrowStringArray),\n ):\n # We might raise if we are in an expansion case, so defer\n # warning till we actually updated\n needs_to_warn = True\n\n elif using_copy_on_write() and not self._has_no_reference(loc):\n blkno = self.blknos[loc]\n # Split blocks to only copy the column we want to modify\n blk_loc = self.blklocs[loc]\n # Copy our values\n values = self.blocks[blkno].values\n if values.ndim == 1:\n values = values.copy()\n else:\n # Use [blk_loc] as indexer to keep ndim=2, this already results in a\n # copy\n values = values[[blk_loc]]\n self._iset_split_block(blkno, [blk_loc], values)\n\n # this manager is only created temporarily to mutate the values in place\n # so don't track references, otherwise the `setitem` would perform CoW again\n col_mgr = self.iget(loc, track_ref=False)\n if inplace_only:\n col_mgr.setitem_inplace(idx, value)\n else:\n new_mgr = col_mgr.setitem((idx,), value)\n self.iset(loc, new_mgr._block.values, inplace=True)\n\n if needs_to_warn:\n warnings.warn(\n COW_WARNING_GENERAL_MSG,\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None:\n """\n Insert item at selected position.\n\n Parameters\n ----------\n loc : int\n item : hashable\n value : np.ndarray or ExtensionArray\n refs : The reference tracking object of the value to set.\n """\n with warnings.catch_warnings():\n # TODO: re-issue this with setitem-specific message?\n warnings.filterwarnings(\n "ignore",\n "The behavior of Index.insert with object-dtype is deprecated",\n category=FutureWarning,\n )\n new_axis = self.items.insert(loc, item)\n\n if value.ndim == 2:\n value = value.T\n if len(value) > 1:\n raise ValueError(\n f"Expected a 1D array, got an array with shape {value.T.shape}"\n )\n else:\n value = ensure_block_shape(value, ndim=self.ndim)\n\n bp = BlockPlacement(slice(loc, loc + 1))\n block = new_block_2d(values=value, placement=bp, refs=refs)\n\n if not len(self.blocks):\n # Fastpath\n self._blklocs = np.array([0], dtype=np.intp)\n self._blknos = np.array([0], dtype=np.intp)\n else:\n self._insert_update_mgr_locs(loc)\n self._insert_update_blklocs_and_blknos(loc)\n\n self.axes[0] = new_axis\n self.blocks += (block,)\n\n self._known_consolidated = False\n\n if sum(not block.is_extension for block in self.blocks) > 100:\n warnings.warn(\n "DataFrame is highly fragmented. This is usually the result "\n "of calling `frame.insert` many times, which has poor performance. "\n "Consider joining all columns at once using pd.concat(axis=1) "\n "instead. To get a de-fragmented frame, use `newframe = frame.copy()`",\n PerformanceWarning,\n stacklevel=find_stack_level(),\n )\n\n def _insert_update_mgr_locs(self, loc) -> None:\n """\n When inserting a new Block at location 'loc', we increment\n all of the mgr_locs of blocks above that by one.\n """\n for blkno, count in _fast_count_smallints(self.blknos[loc:]):\n # .620 this way, .326 of which is in increment_above\n blk = self.blocks[blkno]\n blk._mgr_locs = blk._mgr_locs.increment_above(loc)\n\n def _insert_update_blklocs_and_blknos(self, loc) -> None:\n """\n When inserting a new Block at location 'loc', we update our\n _blklocs and _blknos.\n """\n\n # Accessing public blklocs ensures the public versions are initialized\n if loc == self.blklocs.shape[0]:\n # np.append is a lot faster, let's use it if we can.\n self._blklocs = np.append(self._blklocs, 0)\n self._blknos = np.append(self._blknos, len(self.blocks))\n elif loc == 0:\n # np.append is a lot faster, let's use it if we can.\n self._blklocs = np.append(self._blklocs[::-1], 0)[::-1]\n self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1]\n else:\n new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos(\n self.blklocs, self.blknos, loc, len(self.blocks)\n )\n self._blklocs = new_blklocs\n self._blknos = new_blknos\n\n def idelete(self, indexer) -> BlockManager:\n """\n Delete selected locations, returning a new BlockManager.\n """\n is_deleted = np.zeros(self.shape[0], dtype=np.bool_)\n is_deleted[indexer] = True\n taker = (~is_deleted).nonzero()[0]\n\n nbs = self._slice_take_blocks_ax0(taker, only_slice=True, ref_inplace_op=True)\n new_columns = self.items[~is_deleted]\n axes = [new_columns, self.axes[1]]\n return type(self)(tuple(nbs), axes, verify_integrity=False)\n\n # ----------------------------------------------------------------\n # Block-wise Operation\n\n def grouped_reduce(self, func: Callable) -> Self:\n """\n Apply grouped reduction function blockwise, returning a new BlockManager.\n\n Parameters\n ----------\n func : grouped reduction function\n\n Returns\n -------\n BlockManager\n """\n result_blocks: list[Block] = []\n\n for blk in self.blocks:\n if blk.is_object:\n # split on object-dtype blocks bc some columns may raise\n # while others do not.\n for sb in blk._split():\n applied = sb.apply(func)\n result_blocks = extend_blocks(applied, result_blocks)\n else:\n applied = blk.apply(func)\n result_blocks = extend_blocks(applied, result_blocks)\n\n if len(result_blocks) == 0:\n nrows = 0\n else:\n nrows = result_blocks[0].values.shape[-1]\n index = Index(range(nrows))\n\n return type(self).from_blocks(result_blocks, [self.axes[0], index])\n\n def reduce(self, func: Callable) -> Self:\n """\n Apply reduction function blockwise, returning a single-row BlockManager.\n\n Parameters\n ----------\n func : reduction function\n\n Returns\n -------\n BlockManager\n """\n # If 2D, we assume that we're operating column-wise\n assert self.ndim == 2\n\n res_blocks: list[Block] = []\n for blk in self.blocks:\n nbs = blk.reduce(func)\n res_blocks.extend(nbs)\n\n index = Index([None]) # placeholder\n new_mgr = type(self).from_blocks(res_blocks, [self.items, index])\n return new_mgr\n\n def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager:\n """\n Apply array_op blockwise with another (aligned) BlockManager.\n """\n return operate_blockwise(self, other, array_op)\n\n def _equal_values(self: BlockManager, other: BlockManager) -> bool:\n """\n Used in .equals defined in base class. Only check the column values\n assuming shape and indexes have already been checked.\n """\n return blockwise_all(self, other, array_equals)\n\n def quantile(\n self,\n *,\n qs: Index, # with dtype float 64\n interpolation: QuantileInterpolation = "linear",\n ) -> Self:\n """\n Iterate over blocks applying quantile reduction.\n This routine is intended for reduction type operations and\n will do inference on the generated blocks.\n\n Parameters\n ----------\n interpolation : type of interpolation, default 'linear'\n qs : list of the quantiles to be computed\n\n Returns\n -------\n BlockManager\n """\n # Series dispatches to DataFrame for quantile, which allows us to\n # simplify some of the code here and in the blocks\n assert self.ndim >= 2\n assert is_list_like(qs) # caller is responsible for this\n\n new_axes = list(self.axes)\n new_axes[1] = Index(qs, dtype=np.float64)\n\n blocks = [\n blk.quantile(qs=qs, interpolation=interpolation) for blk in self.blocks\n ]\n\n return type(self)(blocks, new_axes)\n\n # ----------------------------------------------------------------\n\n def unstack(self, unstacker, fill_value) -> BlockManager:\n """\n Return a BlockManager with all blocks unstacked.\n\n Parameters\n ----------\n unstacker : reshape._Unstacker\n fill_value : Any\n fill_value for newly introduced missing values.\n\n Returns\n -------\n unstacked : BlockManager\n """\n new_columns = unstacker.get_new_columns(self.items)\n new_index = unstacker.new_index\n\n allow_fill = not unstacker.mask_all\n if allow_fill:\n # calculating the full mask once and passing it to Block._unstack is\n # faster than letting calculating it in each repeated call\n new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape)\n needs_masking = new_mask2D.any(axis=0)\n else:\n needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool)\n\n new_blocks: list[Block] = []\n columns_mask: list[np.ndarray] = []\n\n if len(self.items) == 0:\n factor = 1\n else:\n fac = len(new_columns) / len(self.items)\n assert fac == int(fac)\n factor = int(fac)\n\n for blk in self.blocks:\n mgr_locs = blk.mgr_locs\n new_placement = mgr_locs.tile_for_unstack(factor)\n\n blocks, mask = blk._unstack(\n unstacker,\n fill_value,\n new_placement=new_placement,\n needs_masking=needs_masking,\n )\n\n new_blocks.extend(blocks)\n columns_mask.extend(mask)\n\n # Block._unstack should ensure this holds,\n assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks)\n # In turn this ensures that in the BlockManager call below\n # we have len(new_columns) == sum(x.shape[0] for x in new_blocks)\n # which suffices to allow us to pass verify_inegrity=False\n\n new_columns = new_columns[columns_mask]\n\n bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False)\n return bm\n\n def to_dict(self) -> dict[str, Self]:\n """\n Return a dict of str(dtype) -> BlockManager\n\n Returns\n -------\n values : a dict of dtype -> BlockManager\n """\n\n bd: dict[str, list[Block]] = {}\n for b in self.blocks:\n bd.setdefault(str(b.dtype), []).append(b)\n\n # TODO(EA2D): the combine will be unnecessary with 2D EAs\n return {dtype: self._combine(blocks) for dtype, blocks in bd.items()}\n\n def as_array(\n self,\n dtype: np.dtype | None = None,\n copy: bool = False,\n na_value: object = lib.no_default,\n ) -> np.ndarray:\n """\n Convert the blockmanager data into an numpy array.\n\n Parameters\n ----------\n dtype : np.dtype or None, default None\n Data type of the return array.\n copy : bool, default False\n If True then guarantee that a copy is returned. A value of\n False does not guarantee that the underlying data is not\n copied.\n na_value : object, default lib.no_default\n Value to be used as the missing value sentinel.\n\n Returns\n -------\n arr : ndarray\n """\n passed_nan = lib.is_float(na_value) and isna(na_value)\n\n if len(self.blocks) == 0:\n arr = np.empty(self.shape, dtype=float)\n return arr.transpose()\n\n if self.is_single_block:\n blk = self.blocks[0]\n\n if na_value is not lib.no_default:\n # We want to copy when na_value is provided to avoid\n # mutating the original object\n if lib.is_np_dtype(blk.dtype, "f") and passed_nan:\n # We are already numpy-float and na_value=np.nan\n pass\n else:\n copy = True\n\n if blk.is_extension:\n # Avoid implicit conversion of extension blocks to object\n\n # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no\n # attribute "to_numpy"\n arr = blk.values.to_numpy( # type: ignore[union-attr]\n dtype=dtype,\n na_value=na_value,\n copy=copy,\n ).reshape(blk.shape)\n elif not copy:\n arr = np.asarray(blk.values, dtype=dtype)\n else:\n arr = np.array(blk.values, dtype=dtype, copy=copy)\n\n if using_copy_on_write() and not copy:\n arr = arr.view()\n arr.flags.writeable = False\n else:\n arr = self._interleave(dtype=dtype, na_value=na_value)\n # The underlying data was copied within _interleave, so no need\n # to further copy if copy=True or setting na_value\n\n if na_value is lib.no_default:\n pass\n elif arr.dtype.kind == "f" and passed_nan:\n pass\n else:\n arr[isna(arr)] = na_value\n\n return arr.transpose()\n\n def _interleave(\n self,\n dtype: np.dtype | None = None,\n na_value: object = lib.no_default,\n ) -> np.ndarray:\n """\n Return ndarray from blocks with specified item order\n Items must be contained in the blocks\n """\n if not dtype:\n # Incompatible types in assignment (expression has type\n # "Optional[Union[dtype[Any], ExtensionDtype]]", variable has\n # type "Optional[dtype[Any]]")\n dtype = interleaved_dtype( # type: ignore[assignment]\n [blk.dtype for blk in self.blocks]\n )\n\n # error: Argument 1 to "ensure_np_dtype" has incompatible type\n # "Optional[dtype[Any]]"; expected "Union[dtype[Any], ExtensionDtype]"\n dtype = ensure_np_dtype(dtype) # type: ignore[arg-type]\n result = np.empty(self.shape, dtype=dtype)\n\n itemmask = np.zeros(self.shape[0])\n\n if dtype == np.dtype("object") and na_value is lib.no_default:\n # much more performant than using to_numpy below\n for blk in self.blocks:\n rl = blk.mgr_locs\n arr = blk.get_values(dtype)\n result[rl.indexer] = arr\n itemmask[rl.indexer] = 1\n return result\n\n for blk in self.blocks:\n rl = blk.mgr_locs\n if blk.is_extension:\n # Avoid implicit conversion of extension blocks to object\n\n # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no\n # attribute "to_numpy"\n arr = blk.values.to_numpy( # type: ignore[union-attr]\n dtype=dtype,\n na_value=na_value,\n )\n else:\n arr = blk.get_values(dtype)\n result[rl.indexer] = arr\n itemmask[rl.indexer] = 1\n\n if not itemmask.all():\n raise AssertionError("Some items were not contained in blocks")\n\n return result\n\n # ----------------------------------------------------------------\n # Consolidation\n\n def is_consolidated(self) -> bool:\n """\n Return True if more than one block with the same dtype\n """\n if not self._known_consolidated:\n self._consolidate_check()\n return self._is_consolidated\n\n def _consolidate_check(self) -> None:\n if len(self.blocks) == 1:\n # fastpath\n self._is_consolidated = True\n self._known_consolidated = True\n return\n dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate]\n self._is_consolidated = len(dtypes) == len(set(dtypes))\n self._known_consolidated = True\n\n def _consolidate_inplace(self) -> None:\n # In general, _consolidate_inplace should only be called via\n # DataFrame._consolidate_inplace, otherwise we will fail to invalidate\n # the DataFrame's _item_cache. The exception is for newly-created\n # BlockManager objects not yet attached to a DataFrame.\n if not self.is_consolidated():\n self.blocks = _consolidate(self.blocks)\n self._is_consolidated = True\n self._known_consolidated = True\n self._rebuild_blknos_and_blklocs()\n\n # ----------------------------------------------------------------\n # Concatenation\n\n @classmethod\n def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self:\n """\n Concatenate uniformly-indexed BlockManagers horizontally.\n """\n offset = 0\n blocks: list[Block] = []\n for mgr in mgrs:\n for blk in mgr.blocks:\n # We need to do getitem_block here otherwise we would be altering\n # blk.mgr_locs in place, which would render it invalid. This is only\n # relevant in the copy=False case.\n nb = blk.slice_block_columns(slice(None))\n nb._mgr_locs = nb._mgr_locs.add(offset)\n blocks.append(nb)\n\n offset += len(mgr.items)\n\n new_mgr = cls(tuple(blocks), axes)\n return new_mgr\n\n @classmethod\n def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self:\n """\n Concatenate uniformly-indexed BlockManagers vertically.\n """\n raise NotImplementedError("This logic lives (for now) in internals.concat")\n\n\nclass SingleBlockManager(BaseBlockManager, SingleDataManager):\n """manage a single block with"""\n\n @property\n def ndim(self) -> Literal[1]:\n return 1\n\n _is_consolidated = True\n _known_consolidated = True\n __slots__ = ()\n is_single_block = True\n\n def __init__(\n self,\n block: Block,\n axis: Index,\n verify_integrity: bool = False,\n ) -> None:\n # Assertions disabled for performance\n # assert isinstance(block, Block), type(block)\n # assert isinstance(axis, Index), type(axis)\n\n self.axes = [axis]\n self.blocks = (block,)\n\n @classmethod\n def from_blocks(\n cls,\n blocks: list[Block],\n axes: list[Index],\n ) -> Self:\n """\n Constructor for BlockManager and SingleBlockManager with same signature.\n """\n assert len(blocks) == 1\n assert len(axes) == 1\n return cls(blocks[0], axes[0], verify_integrity=False)\n\n @classmethod\n def from_array(\n cls, array: ArrayLike, index: Index, refs: BlockValuesRefs | None = None\n ) -> SingleBlockManager:\n """\n Constructor for if we have an array that is not yet a Block.\n """\n array = maybe_coerce_values(array)\n bp = BlockPlacement(slice(0, len(index)))\n block = new_block(array, placement=bp, ndim=1, refs=refs)\n return cls(block, index)\n\n def to_2d_mgr(self, columns: Index) -> BlockManager:\n """\n Manager analogue of Series.to_frame\n """\n blk = self.blocks[0]\n arr = ensure_block_shape(blk.values, ndim=2)\n bp = BlockPlacement(0)\n new_blk = type(blk)(arr, placement=bp, ndim=2, refs=blk.refs)\n axes = [columns, self.axes[0]]\n return BlockManager([new_blk], axes=axes, verify_integrity=False)\n\n def _has_no_reference(self, i: int = 0) -> bool:\n """\n Check for column `i` if it has references.\n (whether it references another array or is itself being referenced)\n Returns True if the column has no references.\n """\n return not self.blocks[0].refs.has_reference()\n\n def __getstate__(self):\n block_values = [b.values for b in self.blocks]\n block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]\n axes_array = list(self.axes)\n\n extra_state = {\n "0.14.1": {\n "axes": axes_array,\n "blocks": [\n {"values": b.values, "mgr_locs": b.mgr_locs.indexer}\n for b in self.blocks\n ],\n }\n }\n\n # First three elements of the state are to maintain forward\n # compatibility with 0.13.1.\n return axes_array, block_values, block_items, extra_state\n\n def __setstate__(self, state) -> None:\n def unpickle_block(values, mgr_locs, ndim: int) -> Block:\n # TODO(EA2D): ndim would be unnecessary with 2D EAs\n # older pickles may store e.g. DatetimeIndex instead of DatetimeArray\n values = extract_array(values, extract_numpy=True)\n if not isinstance(mgr_locs, BlockPlacement):\n mgr_locs = BlockPlacement(mgr_locs)\n\n values = maybe_coerce_values(values)\n return new_block(values, placement=mgr_locs, ndim=ndim)\n\n if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:\n state = state[3]["0.14.1"]\n self.axes = [ensure_index(ax) for ax in state["axes"]]\n ndim = len(self.axes)\n self.blocks = tuple(\n unpickle_block(b["values"], b["mgr_locs"], ndim=ndim)\n for b in state["blocks"]\n )\n else:\n raise NotImplementedError("pre-0.14.1 pickles are no longer supported")\n\n self._post_setstate()\n\n def _post_setstate(self) -> None:\n pass\n\n @cache_readonly\n def _block(self) -> Block:\n return self.blocks[0]\n\n @property\n def _blknos(self):\n """compat with BlockManager"""\n return None\n\n @property\n def _blklocs(self):\n """compat with BlockManager"""\n return None\n\n def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> Self:\n # similar to get_slice, but not restricted to slice indexer\n blk = self._block\n if using_copy_on_write() and len(indexer) > 0 and indexer.all():\n return type(self)(blk.copy(deep=False), self.index)\n array = blk.values[indexer]\n\n if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "b":\n # boolean indexing always gives a copy with numpy\n refs = None\n else:\n # TODO(CoW) in theory only need to track reference if new_array is a view\n refs = blk.refs\n\n bp = BlockPlacement(slice(0, len(array)))\n block = type(blk)(array, placement=bp, ndim=1, refs=refs)\n\n new_idx = self.index[indexer]\n return type(self)(block, new_idx)\n\n def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleBlockManager:\n # Assertion disabled for performance\n # assert isinstance(slobj, slice), type(slobj)\n if axis >= self.ndim:\n raise IndexError("Requested axis not found in manager")\n\n blk = self._block\n array = blk.values[slobj]\n bp = BlockPlacement(slice(0, len(array)))\n # TODO this method is only used in groupby SeriesSplitter at the moment,\n # so passing refs is not yet covered by the tests\n block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs)\n new_index = self.index._getitem_slice(slobj)\n return type(self)(block, new_index)\n\n @property\n def index(self) -> Index:\n return self.axes[0]\n\n @property\n def dtype(self) -> DtypeObj:\n return self._block.dtype\n\n def get_dtypes(self) -> npt.NDArray[np.object_]:\n return np.array([self._block.dtype], dtype=object)\n\n def external_values(self):\n """The array that Series.values returns"""\n return self._block.external_values()\n\n def internal_values(self):\n """The array that Series._values returns"""\n return self._block.values\n\n def array_values(self) -> ExtensionArray:\n """The array that Series.array returns"""\n return self._block.array_values\n\n def get_numeric_data(self) -> Self:\n if self._block.is_numeric:\n return self.copy(deep=False)\n return self.make_empty()\n\n @property\n def _can_hold_na(self) -> bool:\n return self._block._can_hold_na\n\n def setitem_inplace(self, indexer, value, warn: bool = True) -> None:\n """\n Set values with indexer.\n\n For Single[Block/Array]Manager, this backs s[indexer] = value\n\n This is an inplace version of `setitem()`, mutating the manager/values\n in place, not returning a new Manager (and Block), and thus never changing\n the dtype.\n """\n using_cow = using_copy_on_write()\n warn_cow = warn_copy_on_write()\n if (using_cow or warn_cow) and not self._has_no_reference(0):\n if using_cow:\n self.blocks = (self._block.copy(),)\n self._cache.clear()\n elif warn_cow and warn:\n warnings.warn(\n COW_WARNING_SETITEM_MSG,\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n super().setitem_inplace(indexer, value)\n\n def idelete(self, indexer) -> SingleBlockManager:\n """\n Delete single location from SingleBlockManager.\n\n Ensures that self.blocks doesn't become empty.\n """\n nb = self._block.delete(indexer)[0]\n self.blocks = (nb,)\n self.axes[0] = self.axes[0].delete(indexer)\n self._cache.clear()\n return self\n\n def fast_xs(self, loc):\n """\n fast path for getting a cross-section\n return a view of the data\n """\n raise NotImplementedError("Use series._values[loc] instead")\n\n def set_values(self, values: ArrayLike) -> None:\n """\n Set the values of the single block in place.\n\n Use at your own risk! This does not check if the passed values are\n valid for the current Block/SingleBlockManager (length, dtype, etc),\n and this does not properly keep track of references.\n """\n # NOTE(CoW) Currently this is only used for FrameColumnApply.series_generator\n # which handles CoW by setting the refs manually if necessary\n self.blocks[0].values = values\n self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values)))\n\n def _equal_values(self, other: Self) -> bool:\n """\n Used in .equals defined in base class. Only check the column values\n assuming shape and indexes have already been checked.\n """\n # For SingleBlockManager (i.e.Series)\n if other.ndim != 1:\n return False\n left = self.blocks[0].values\n right = other.blocks[0].values\n return array_equals(left, right)\n\n\n# --------------------------------------------------------------------\n# Constructor Helpers\n\n\ndef create_block_manager_from_blocks(\n blocks: list[Block],\n axes: list[Index],\n consolidate: bool = True,\n verify_integrity: bool = True,\n) -> BlockManager:\n # If verify_integrity=False, then caller is responsible for checking\n # all(x.shape[-1] == len(axes[1]) for x in blocks)\n # sum(x.shape[0] for x in blocks) == len(axes[0])\n # set(x for blk in blocks for x in blk.mgr_locs) == set(range(len(axes[0])))\n # all(blk.ndim == 2 for blk in blocks)\n # This allows us to safely pass verify_integrity=False\n\n try:\n mgr = BlockManager(blocks, axes, verify_integrity=verify_integrity)\n\n except ValueError as err:\n arrays = [blk.values for blk in blocks]\n tot_items = sum(arr.shape[0] for arr in arrays)\n raise_construction_error(tot_items, arrays[0].shape[1:], axes, err)\n\n if consolidate:\n mgr._consolidate_inplace()\n return mgr\n\n\ndef create_block_manager_from_column_arrays(\n arrays: list[ArrayLike],\n axes: list[Index],\n consolidate: bool,\n refs: list,\n) -> BlockManager:\n # Assertions disabled for performance (caller is responsible for verifying)\n # assert isinstance(axes, list)\n # assert all(isinstance(x, Index) for x in axes)\n # assert all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)\n # assert all(type(x) is not NumpyExtensionArray for x in arrays)\n # assert all(x.ndim == 1 for x in arrays)\n # assert all(len(x) == len(axes[1]) for x in arrays)\n # assert len(arrays) == len(axes[0])\n # These last three are sufficient to allow us to safely pass\n # verify_integrity=False below.\n\n try:\n blocks = _form_blocks(arrays, consolidate, refs)\n mgr = BlockManager(blocks, axes, verify_integrity=False)\n except ValueError as e:\n raise_construction_error(len(arrays), arrays[0].shape, axes, e)\n if consolidate:\n mgr._consolidate_inplace()\n return mgr\n\n\ndef raise_construction_error(\n tot_items: int,\n block_shape: Shape,\n axes: list[Index],\n e: ValueError | None = None,\n):\n """raise a helpful message about our construction"""\n passed = tuple(map(int, [tot_items] + list(block_shape)))\n # Correcting the user facing error message during dataframe construction\n if len(passed) <= 2:\n passed = passed[::-1]\n\n implied = tuple(len(ax) for ax in axes)\n # Correcting the user facing error message during dataframe construction\n if len(implied) <= 2:\n implied = implied[::-1]\n\n # We return the exception object instead of raising it so that we\n # can raise it in the caller; mypy plays better with that\n if passed == implied and e is not None:\n raise e\n if block_shape[0] == 0:\n raise ValueError("Empty data passed with indices specified.")\n raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")\n\n\n# -----------------------------------------------------------------------\n\n\ndef _grouping_func(tup: tuple[int, ArrayLike]) -> tuple[int, DtypeObj]:\n dtype = tup[1].dtype\n\n if is_1d_only_ea_dtype(dtype):\n # We know these won't be consolidated, so don't need to group these.\n # This avoids expensive comparisons of CategoricalDtype objects\n sep = id(dtype)\n else:\n sep = 0\n\n return sep, dtype\n\n\ndef _form_blocks(arrays: list[ArrayLike], consolidate: bool, refs: list) -> list[Block]:\n tuples = list(enumerate(arrays))\n\n if not consolidate:\n return _tuples_to_blocks_no_consolidate(tuples, refs)\n\n # when consolidating, we can ignore refs (either stacking always copies,\n # or the EA is already copied in the calling dict_to_mgr)\n\n # group by dtype\n grouper = itertools.groupby(tuples, _grouping_func)\n\n nbs: list[Block] = []\n for (_, dtype), tup_block in grouper:\n block_type = get_block_type(dtype)\n\n if isinstance(dtype, np.dtype):\n is_dtlike = dtype.kind in "mM"\n\n if issubclass(dtype.type, (str, bytes)):\n dtype = np.dtype(object)\n\n values, placement = _stack_arrays(list(tup_block), dtype)\n if is_dtlike:\n values = ensure_wrapped_if_datetimelike(values)\n blk = block_type(values, placement=BlockPlacement(placement), ndim=2)\n nbs.append(blk)\n\n elif is_1d_only_ea_dtype(dtype):\n dtype_blocks = [\n block_type(x[1], placement=BlockPlacement(x[0]), ndim=2)\n for x in tup_block\n ]\n nbs.extend(dtype_blocks)\n\n else:\n dtype_blocks = [\n block_type(\n ensure_block_shape(x[1], 2), placement=BlockPlacement(x[0]), ndim=2\n )\n for x in tup_block\n ]\n nbs.extend(dtype_blocks)\n return nbs\n\n\ndef _tuples_to_blocks_no_consolidate(tuples, refs) -> list[Block]:\n # tuples produced within _form_blocks are of the form (placement, array)\n return [\n new_block_2d(\n ensure_block_shape(arr, ndim=2), placement=BlockPlacement(i), refs=ref\n )\n for ((i, arr), ref) in zip(tuples, refs)\n ]\n\n\ndef _stack_arrays(tuples, dtype: np.dtype):\n placement, arrays = zip(*tuples)\n\n first = arrays[0]\n shape = (len(arrays),) + first.shape\n\n stacked = np.empty(shape, dtype=dtype)\n for i, arr in enumerate(arrays):\n stacked[i] = arr\n\n return stacked, placement\n\n\ndef _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]:\n """\n Merge blocks having same dtype, exclude non-consolidating blocks\n """\n # sort by _can_consolidate, dtype\n gkey = lambda x: x._consolidate_key\n grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)\n\n new_blocks: list[Block] = []\n for (_can_consolidate, dtype), group_blocks in grouper:\n merged_blocks, _ = _merge_blocks(\n list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate\n )\n new_blocks = extend_blocks(merged_blocks, new_blocks)\n return tuple(new_blocks)\n\n\ndef _merge_blocks(\n blocks: list[Block], dtype: DtypeObj, can_consolidate: bool\n) -> tuple[list[Block], bool]:\n if len(blocks) == 1:\n return blocks, False\n\n if can_consolidate:\n # TODO: optimization potential in case all mgrs contain slices and\n # combination of those slices is a slice, too.\n new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])\n\n new_values: ArrayLike\n\n if isinstance(blocks[0].dtype, np.dtype):\n # error: List comprehension has incompatible type List[Union[ndarray,\n # ExtensionArray]]; expected List[Union[complex, generic,\n # Sequence[Union[int, float, complex, str, bytes, generic]],\n # Sequence[Sequence[Any]], SupportsArray]]\n new_values = np.vstack([b.values for b in blocks]) # type: ignore[misc]\n else:\n bvals = [blk.values for blk in blocks]\n bvals2 = cast(Sequence[NDArrayBackedExtensionArray], bvals)\n new_values = bvals2[0]._concat_same_type(bvals2, axis=0)\n\n argsort = np.argsort(new_mgr_locs)\n new_values = new_values[argsort]\n new_mgr_locs = new_mgr_locs[argsort]\n\n bp = BlockPlacement(new_mgr_locs)\n return [new_block_2d(new_values, placement=bp)], True\n\n # can't consolidate --> no merge\n return blocks, False\n\n\ndef _fast_count_smallints(arr: npt.NDArray[np.intp]):\n """Faster version of set(arr) for sequences of small numbers."""\n counts = np.bincount(arr)\n nz = counts.nonzero()[0]\n # Note: list(zip(...) outperforms list(np.c_[nz, counts[nz]]) here,\n # in one benchmark by a factor of 11\n return zip(nz, counts[nz])\n\n\ndef _preprocess_slice_or_indexer(\n slice_or_indexer: slice | np.ndarray, length: int, allow_fill: bool\n):\n if isinstance(slice_or_indexer, slice):\n return (\n "slice",\n slice_or_indexer,\n libinternals.slice_len(slice_or_indexer, length),\n )\n else:\n if (\n not isinstance(slice_or_indexer, np.ndarray)\n or slice_or_indexer.dtype.kind != "i"\n ):\n dtype = getattr(slice_or_indexer, "dtype", None)\n raise TypeError(type(slice_or_indexer), dtype)\n\n indexer = ensure_platform_int(slice_or_indexer)\n if not allow_fill:\n indexer = maybe_convert_indices(indexer, length)\n return "fancy", indexer, len(indexer)\n\n\ndef make_na_array(dtype: DtypeObj, shape: Shape, fill_value) -> ArrayLike:\n if isinstance(dtype, DatetimeTZDtype):\n # NB: exclude e.g. pyarrow[dt64tz] dtypes\n ts = Timestamp(fill_value).as_unit(dtype.unit)\n i8values = np.full(shape, ts._value)\n dt64values = i8values.view(f"M8[{dtype.unit}]")\n return DatetimeArray._simple_new(dt64values, dtype=dtype)\n\n elif is_1d_only_ea_dtype(dtype):\n dtype = cast(ExtensionDtype, dtype)\n cls = dtype.construct_array_type()\n\n missing_arr = cls._from_sequence([], dtype=dtype)\n ncols, nrows = shape\n assert ncols == 1, ncols\n empty_arr = -1 * np.ones((nrows,), dtype=np.intp)\n return missing_arr.take(empty_arr, allow_fill=True, fill_value=fill_value)\n elif isinstance(dtype, ExtensionDtype):\n # TODO: no tests get here, a handful would if we disabled\n # the dt64tz special-case above (which is faster)\n cls = dtype.construct_array_type()\n missing_arr = cls._empty(shape=shape, dtype=dtype)\n missing_arr[:] = fill_value\n return missing_arr\n else:\n # NB: we should never get here with dtype integer or bool;\n # if we did, the missing_arr.fill would cast to gibberish\n missing_arr = np.empty(shape, dtype=dtype)\n missing_arr.fill(fill_value)\n\n if dtype.kind in "mM":\n missing_arr = ensure_wrapped_if_datetimelike(missing_arr)\n return missing_arr\n | .venv\Lib\site-packages\pandas\core\internals\managers.py | managers.py | Python | 81,576 | 0.75 | 0.181895 | 0.116877 | python-kit | 861 | 2024-03-31T16:24:00.509901 | Apache-2.0 | false | 274f3d08bfb4831987864ccd392168cd |
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n NamedTuple,\n)\n\nfrom pandas.core.dtypes.common import is_1d_only_ea_dtype\n\nif TYPE_CHECKING:\n from collections.abc import Iterator\n\n from pandas._libs.internals import BlockPlacement\n from pandas._typing import ArrayLike\n\n from pandas.core.internals.blocks import Block\n from pandas.core.internals.managers import BlockManager\n\n\nclass BlockPairInfo(NamedTuple):\n lvals: ArrayLike\n rvals: ArrayLike\n locs: BlockPlacement\n left_ea: bool\n right_ea: bool\n rblk: Block\n\n\ndef _iter_block_pairs(\n left: BlockManager, right: BlockManager\n) -> Iterator[BlockPairInfo]:\n # At this point we have already checked the parent DataFrames for\n # assert rframe._indexed_same(lframe)\n\n for blk in left.blocks:\n locs = blk.mgr_locs\n blk_vals = blk.values\n\n left_ea = blk_vals.ndim == 1\n\n rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)\n\n # Assertions are disabled for performance, but should hold:\n # if left_ea:\n # assert len(locs) == 1, locs\n # assert len(rblks) == 1, rblks\n # assert rblks[0].shape[0] == 1, rblks[0].shape\n\n for rblk in rblks:\n right_ea = rblk.values.ndim == 1\n\n lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea)\n info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk)\n yield info\n\n\ndef operate_blockwise(\n left: BlockManager, right: BlockManager, array_op\n) -> BlockManager:\n # At this point we have already checked the parent DataFrames for\n # assert rframe._indexed_same(lframe)\n\n res_blks: list[Block] = []\n for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right):\n res_values = array_op(lvals, rvals)\n if (\n left_ea\n and not right_ea\n and hasattr(res_values, "reshape")\n and not is_1d_only_ea_dtype(res_values.dtype)\n ):\n res_values = res_values.reshape(1, -1)\n nbs = rblk._split_op_result(res_values)\n\n # Assertions are disabled for performance, but should hold:\n # if right_ea or left_ea:\n # assert len(nbs) == 1\n # else:\n # assert res_values.shape == lvals.shape, (res_values.shape, lvals.shape)\n\n _reset_block_mgr_locs(nbs, locs)\n\n res_blks.extend(nbs)\n\n # Assertions are disabled for performance, but should hold:\n # slocs = {y for nb in res_blks for y in nb.mgr_locs.as_array}\n # nlocs = sum(len(nb.mgr_locs.as_array) for nb in res_blks)\n # assert nlocs == len(left.items), (nlocs, len(left.items))\n # assert len(slocs) == nlocs, (len(slocs), nlocs)\n # assert slocs == set(range(nlocs)), slocs\n\n new_mgr = type(right)(tuple(res_blks), axes=right.axes, verify_integrity=False)\n return new_mgr\n\n\ndef _reset_block_mgr_locs(nbs: list[Block], locs) -> None:\n """\n Reset mgr_locs to correspond to our original DataFrame.\n """\n for nb in nbs:\n nblocs = locs[nb.mgr_locs.indexer]\n nb.mgr_locs = nblocs\n # Assertions are disabled for performance, but should hold:\n # assert len(nblocs) == nb.shape[0], (len(nblocs), nb.shape)\n # assert all(x in locs.as_array for x in nb.mgr_locs.as_array)\n\n\ndef _get_same_shape_values(\n lblk: Block, rblk: Block, left_ea: bool, right_ea: bool\n) -> tuple[ArrayLike, ArrayLike]:\n """\n Slice lblk.values to align with rblk. Squeeze if we have EAs.\n """\n lvals = lblk.values\n rvals = rblk.values\n\n # Require that the indexing into lvals be slice-like\n assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs\n\n # TODO(EA2D): with 2D EAs only this first clause would be needed\n if not (left_ea or right_ea):\n # error: No overload variant of "__getitem__" of "ExtensionArray" matches\n # argument type "Tuple[Union[ndarray, slice], slice]"\n lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload]\n assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)\n elif left_ea and right_ea:\n assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)\n elif right_ea:\n # lvals are 2D, rvals are 1D\n\n # error: No overload variant of "__getitem__" of "ExtensionArray" matches\n # argument type "Tuple[Union[ndarray, slice], slice]"\n lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload]\n assert lvals.shape[0] == 1, lvals.shape\n lvals = lvals[0, :]\n else:\n # lvals are 1D, rvals are 2D\n assert rvals.shape[0] == 1, rvals.shape\n # error: No overload variant of "__getitem__" of "ExtensionArray" matches\n # argument type "Tuple[int, slice]"\n rvals = rvals[0, :] # type: ignore[call-overload]\n\n return lvals, rvals\n\n\ndef blockwise_all(left: BlockManager, right: BlockManager, op) -> bool:\n """\n Blockwise `all` reduction.\n """\n for info in _iter_block_pairs(left, right):\n res = op(info.lvals, info.rvals)\n if not res:\n return False\n return True\n | .venv\Lib\site-packages\pandas\core\internals\ops.py | ops.py | Python | 5,145 | 0.95 | 0.181818 | 0.272727 | awesome-app | 165 | 2024-08-18T12:42:21.170825 | BSD-3-Clause | false | b8df0a4b885a0dd9f5a3a1d691d3eb2d |
from pandas.core.internals.api import make_block # 2023-09-18 pyarrow uses this\nfrom pandas.core.internals.array_manager import (\n ArrayManager,\n SingleArrayManager,\n)\nfrom pandas.core.internals.base import (\n DataManager,\n SingleDataManager,\n)\nfrom pandas.core.internals.concat import concatenate_managers\nfrom pandas.core.internals.managers import (\n BlockManager,\n SingleBlockManager,\n)\n\n__all__ = [\n "Block", # pylint: disable=undefined-all-variable\n "DatetimeTZBlock", # pylint: disable=undefined-all-variable\n "ExtensionBlock", # pylint: disable=undefined-all-variable\n "make_block",\n "DataManager",\n "ArrayManager",\n "BlockManager",\n "SingleDataManager",\n "SingleBlockManager",\n "SingleArrayManager",\n "concatenate_managers",\n]\n\n\ndef __getattr__(name: str):\n # GH#55139\n import warnings\n\n if name == "create_block_manager_from_blocks":\n # GH#33892\n warnings.warn(\n f"{name} is deprecated and will be removed in a future version. "\n "Use public APIs instead.",\n DeprecationWarning,\n # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758\n # on hard-coding stacklevel\n stacklevel=2,\n )\n from pandas.core.internals.managers import create_block_manager_from_blocks\n\n return create_block_manager_from_blocks\n\n if name in [\n "NumericBlock",\n "ObjectBlock",\n "Block",\n "ExtensionBlock",\n "DatetimeTZBlock",\n ]:\n warnings.warn(\n f"{name} is deprecated and will be removed in a future version. "\n "Use public APIs instead.",\n DeprecationWarning,\n # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758\n # on hard-coding stacklevel\n stacklevel=2,\n )\n if name == "NumericBlock":\n from pandas.core.internals.blocks import NumericBlock\n\n return NumericBlock\n elif name == "DatetimeTZBlock":\n from pandas.core.internals.blocks import DatetimeTZBlock\n\n return DatetimeTZBlock\n elif name == "ExtensionBlock":\n from pandas.core.internals.blocks import ExtensionBlock\n\n return ExtensionBlock\n elif name == "Block":\n from pandas.core.internals.blocks import Block\n\n return Block\n else:\n from pandas.core.internals.blocks import ObjectBlock\n\n return ObjectBlock\n\n raise AttributeError(f"module 'pandas.core.internals' has no attribute '{name}'")\n | .venv\Lib\site-packages\pandas\core\internals\__init__.py | __init__.py | Python | 2,615 | 0.95 | 0.047059 | 0.082192 | vue-tools | 468 | 2024-02-07T21:50:35.676054 | Apache-2.0 | false | 5a85adaac33cac405a6502e3e9401279 |
\n\n | .venv\Lib\site-packages\pandas\core\internals\__pycache__\api.cpython-313.pyc | api.cpython-313.pyc | Other | 4,088 | 0.95 | 0.014925 | 0 | vue-tools | 634 | 2024-05-24T00:03:47.239804 | BSD-3-Clause | false | 6fbaa5dea74c4f8303c6f1a470775025 |
\n\n | .venv\Lib\site-packages\pandas\core\internals\__pycache__\array_manager.cpython-313.pyc | array_manager.cpython-313.pyc | Other | 53,658 | 0.95 | 0.032907 | 0.012097 | vue-tools | 884 | 2025-06-21T01:18:16.083689 | Apache-2.0 | false | a03b4666afec026772b980372c3f5d46 |
\n\n | .venv\Lib\site-packages\pandas\core\internals\__pycache__\base.cpython-313.pyc | base.cpython-313.pyc | Other | 14,433 | 0.95 | 0.026738 | 0.005556 | python-kit | 808 | 2024-12-28T16:49:17.119866 | BSD-3-Clause | false | 5b85a8501e7305a0f0d9c8e2b2215e37 |
\n\n | .venv\Lib\site-packages\pandas\core\internals\__pycache__\blocks.cpython-313.pyc | blocks.cpython-313.pyc | Other | 95,456 | 0.75 | 0.031196 | 0.012346 | node-utils | 715 | 2025-05-06T21:42:29.609751 | MIT | false | d7a08c1a32153db8c6275a987288d12b |
\n\n | .venv\Lib\site-packages\pandas\core\internals\__pycache__\concat.cpython-313.pyc | concat.cpython-313.pyc | Other | 22,387 | 0.8 | 0.008333 | 0.004505 | node-utils | 317 | 2023-12-20T06:55:45.556697 | MIT | false | c4b998b4d36a4bd0bc77043e0dd0e334 |
\n\n | .venv\Lib\site-packages\pandas\core\internals\__pycache__\construction.cpython-313.pyc | construction.cpython-313.pyc | Other | 36,361 | 0.95 | 0.028061 | 0.002755 | awesome-app | 421 | 2024-06-19T17:47:55.679918 | MIT | false | 4843d4cc5c74d9ee2f4e7cdea5f59743 |
\n\n | .venv\Lib\site-packages\pandas\core\internals\__pycache__\managers.cpython-313.pyc | managers.cpython-313.pyc | Other | 90,230 | 0.75 | 0.041111 | 0.00729 | awesome-app | 705 | 2024-02-07T19:42:19.440567 | BSD-3-Clause | false | 2d836d64b2c2c365f60c9c0ecb9484f7 |
\n\n | .venv\Lib\site-packages\pandas\core\internals\__pycache__\ops.cpython-313.pyc | ops.cpython-313.pyc | Other | 5,066 | 0.8 | 0.015385 | 0 | node-utils | 276 | 2024-12-10T18:39:02.459505 | MIT | false | 27cbfe3e34e0997ae839d6ed761107be |
\n\n | .venv\Lib\site-packages\pandas\core\internals\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 1,847 | 0.8 | 0 | 0 | awesome-app | 776 | 2024-06-18T02:29:07.835045 | Apache-2.0 | false | a62279f6536aa18944225ba922bd0bd2 |
"""\nModule responsible for execution of NDFrame.describe() method.\n\nMethod NDFrame.describe() delegates actual execution to function describe_ndframe().\n"""\nfrom __future__ import annotations\n\nfrom abc import (\n ABC,\n abstractmethod,\n)\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import Timestamp\nfrom pandas._typing import (\n DtypeObj,\n NDFrameT,\n npt,\n)\nfrom pandas.util._validators import validate_percentile\n\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_numeric_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n ArrowDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n)\n\nfrom pandas.core.arrays.floating import Float64Dtype\nfrom pandas.core.reshape.concat import concat\n\nfrom pandas.io.formats.format import format_percentiles\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Sequence,\n )\n\n from pandas import (\n DataFrame,\n Series,\n )\n\n\ndef describe_ndframe(\n *,\n obj: NDFrameT,\n include: str | Sequence[str] | None,\n exclude: str | Sequence[str] | None,\n percentiles: Sequence[float] | np.ndarray | None,\n) -> NDFrameT:\n """Describe series or dataframe.\n\n Called from pandas.core.generic.NDFrame.describe()\n\n Parameters\n ----------\n obj: DataFrame or Series\n Either dataframe or series to be described.\n include : 'all', list-like of dtypes or None (default), optional\n A white list of data types to include in the result. Ignored for ``Series``.\n exclude : list-like of dtypes or None (default), optional,\n A black list of data types to omit from the result. Ignored for ``Series``.\n percentiles : list-like of numbers, optional\n The percentiles to include in the output. All should fall between 0 and 1.\n The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and\n 75th percentiles.\n\n Returns\n -------\n Dataframe or series description.\n """\n percentiles = _refine_percentiles(percentiles)\n\n describer: NDFrameDescriberAbstract\n if obj.ndim == 1:\n describer = SeriesDescriber(\n obj=cast("Series", obj),\n )\n else:\n describer = DataFrameDescriber(\n obj=cast("DataFrame", obj),\n include=include,\n exclude=exclude,\n )\n\n result = describer.describe(percentiles=percentiles)\n return cast(NDFrameT, result)\n\n\nclass NDFrameDescriberAbstract(ABC):\n """Abstract class for describing dataframe or series.\n\n Parameters\n ----------\n obj : Series or DataFrame\n Object to be described.\n """\n\n def __init__(self, obj: DataFrame | Series) -> None:\n self.obj = obj\n\n @abstractmethod\n def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series:\n """Do describe either series or dataframe.\n\n Parameters\n ----------\n percentiles : list-like of numbers\n The percentiles to include in the output.\n """\n\n\nclass SeriesDescriber(NDFrameDescriberAbstract):\n """Class responsible for creating series description."""\n\n obj: Series\n\n def describe(self, percentiles: Sequence[float] | np.ndarray) -> Series:\n describe_func = select_describe_func(\n self.obj,\n )\n return describe_func(self.obj, percentiles)\n\n\nclass DataFrameDescriber(NDFrameDescriberAbstract):\n """Class responsible for creating dataobj description.\n\n Parameters\n ----------\n obj : DataFrame\n DataFrame to be described.\n include : 'all', list-like of dtypes or None\n A white list of data types to include in the result.\n exclude : list-like of dtypes or None\n A black list of data types to omit from the result.\n """\n\n obj: DataFrame\n\n def __init__(\n self,\n obj: DataFrame,\n *,\n include: str | Sequence[str] | None,\n exclude: str | Sequence[str] | None,\n ) -> None:\n self.include = include\n self.exclude = exclude\n\n if obj.ndim == 2 and obj.columns.size == 0:\n raise ValueError("Cannot describe a DataFrame without columns")\n\n super().__init__(obj)\n\n def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame:\n data = self._select_data()\n\n ldesc: list[Series] = []\n for _, series in data.items():\n describe_func = select_describe_func(series)\n ldesc.append(describe_func(series, percentiles))\n\n col_names = reorder_columns(ldesc)\n d = concat(\n [x.reindex(col_names, copy=False) for x in ldesc],\n axis=1,\n sort=False,\n )\n d.columns = data.columns.copy()\n return d\n\n def _select_data(self) -> DataFrame:\n """Select columns to be described."""\n if (self.include is None) and (self.exclude is None):\n # when some numerics are found, keep only numerics\n default_include: list[npt.DTypeLike] = [np.number, "datetime"]\n data = self.obj.select_dtypes(include=default_include)\n if len(data.columns) == 0:\n data = self.obj\n elif self.include == "all":\n if self.exclude is not None:\n msg = "exclude must be None when include is 'all'"\n raise ValueError(msg)\n data = self.obj\n else:\n data = self.obj.select_dtypes(\n include=self.include,\n exclude=self.exclude,\n )\n return data\n\n\ndef reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]:\n """Set a convenient order for rows for display."""\n names: list[Hashable] = []\n seen_names: set[Hashable] = set()\n ldesc_indexes = sorted((x.index for x in ldesc), key=len)\n for idxnames in ldesc_indexes:\n for name in idxnames:\n if name not in seen_names:\n seen_names.add(name)\n names.append(name)\n return names\n\n\ndef describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series:\n """Describe series containing numerical data.\n\n Parameters\n ----------\n series : Series\n Series to be described.\n percentiles : list-like of numbers\n The percentiles to include in the output.\n """\n from pandas import Series\n\n formatted_percentiles = format_percentiles(percentiles)\n\n stat_index = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"]\n d = (\n [series.count(), series.mean(), series.std(), series.min()]\n + series.quantile(percentiles).tolist()\n + [series.max()]\n )\n # GH#48340 - always return float on non-complex numeric data\n dtype: DtypeObj | None\n if isinstance(series.dtype, ExtensionDtype):\n if isinstance(series.dtype, ArrowDtype):\n if series.dtype.kind == "m":\n # GH53001: describe timedeltas with object dtype\n dtype = None\n else:\n import pyarrow as pa\n\n dtype = ArrowDtype(pa.float64())\n else:\n dtype = Float64Dtype()\n elif series.dtype.kind in "iufb":\n # i.e. numeric but exclude complex dtype\n dtype = np.dtype("float")\n else:\n dtype = None\n return Series(d, index=stat_index, name=series.name, dtype=dtype)\n\n\ndef describe_categorical_1d(\n data: Series,\n percentiles_ignored: Sequence[float],\n) -> Series:\n """Describe series containing categorical data.\n\n Parameters\n ----------\n data : Series\n Series to be described.\n percentiles_ignored : list-like of numbers\n Ignored, but in place to unify interface.\n """\n names = ["count", "unique", "top", "freq"]\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n if count_unique > 0:\n top, freq = objcounts.index[0], objcounts.iloc[0]\n dtype = None\n else:\n # If the DataFrame is empty, set 'top' and 'freq' to None\n # to maintain output shape consistency\n top, freq = np.nan, np.nan\n dtype = "object"\n\n result = [data.count(), count_unique, top, freq]\n\n from pandas import Series\n\n return Series(result, index=names, name=data.name, dtype=dtype)\n\n\ndef describe_timestamp_as_categorical_1d(\n data: Series,\n percentiles_ignored: Sequence[float],\n) -> Series:\n """Describe series containing timestamp data treated as categorical.\n\n Parameters\n ----------\n data : Series\n Series to be described.\n percentiles_ignored : list-like of numbers\n Ignored, but in place to unify interface.\n """\n names = ["count", "unique"]\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n result: list[float | Timestamp] = [data.count(), count_unique]\n dtype = None\n if count_unique > 0:\n top, freq = objcounts.index[0], objcounts.iloc[0]\n tz = data.dt.tz\n asint = data.dropna().values.view("i8")\n top = Timestamp(top)\n if top.tzinfo is not None and tz is not None:\n # Don't tz_localize(None) if key is already tz-aware\n top = top.tz_convert(tz)\n else:\n top = top.tz_localize(tz)\n names += ["top", "freq", "first", "last"]\n result += [\n top,\n freq,\n Timestamp(asint.min(), tz=tz),\n Timestamp(asint.max(), tz=tz),\n ]\n\n # If the DataFrame is empty, set 'top' and 'freq' to None\n # to maintain output shape consistency\n else:\n names += ["top", "freq"]\n result += [np.nan, np.nan]\n dtype = "object"\n\n from pandas import Series\n\n return Series(result, index=names, name=data.name, dtype=dtype)\n\n\ndef describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series:\n """Describe series containing datetime64 dtype.\n\n Parameters\n ----------\n data : Series\n Series to be described.\n percentiles : list-like of numbers\n The percentiles to include in the output.\n """\n # GH-30164\n from pandas import Series\n\n formatted_percentiles = format_percentiles(percentiles)\n\n stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]\n d = (\n [data.count(), data.mean(), data.min()]\n + data.quantile(percentiles).tolist()\n + [data.max()]\n )\n return Series(d, index=stat_index, name=data.name)\n\n\ndef select_describe_func(\n data: Series,\n) -> Callable:\n """Select proper function for describing series based on data type.\n\n Parameters\n ----------\n data : Series\n Series to be described.\n """\n if is_bool_dtype(data.dtype):\n return describe_categorical_1d\n elif is_numeric_dtype(data):\n return describe_numeric_1d\n elif data.dtype.kind == "M" or isinstance(data.dtype, DatetimeTZDtype):\n return describe_timestamp_1d\n elif data.dtype.kind == "m":\n return describe_numeric_1d\n else:\n return describe_categorical_1d\n\n\ndef _refine_percentiles(\n percentiles: Sequence[float] | np.ndarray | None,\n) -> npt.NDArray[np.float64]:\n """\n Ensure that percentiles are unique and sorted.\n\n Parameters\n ----------\n percentiles : list-like of numbers, optional\n The percentiles to include in the output.\n """\n if percentiles is None:\n return np.array([0.25, 0.5, 0.75])\n\n # explicit conversion of `percentiles` to list\n percentiles = list(percentiles)\n\n # get them all to be in [0, 1]\n validate_percentile(percentiles)\n\n # median should always be included\n if 0.5 not in percentiles:\n percentiles.append(0.5)\n\n percentiles = np.asarray(percentiles)\n\n # sort and check for duplicates\n unique_pcts = np.unique(percentiles)\n assert percentiles is not None\n if len(unique_pcts) < len(percentiles):\n raise ValueError("percentiles cannot contain duplicates")\n\n return unique_pcts\n | .venv\Lib\site-packages\pandas\core\methods\describe.py | describe.py | Python | 11,961 | 0.95 | 0.127404 | 0.046784 | react-lib | 915 | 2023-09-14T21:04:30.927124 | MIT | false | 05e6ae783b864b32291e8f00cad03441 |
"""\nImplementation of nlargest and nsmallest.\n"""\n\nfrom __future__ import annotations\n\nfrom collections.abc import (\n Hashable,\n Sequence,\n)\nfrom typing import (\n TYPE_CHECKING,\n cast,\n final,\n)\n\nimport numpy as np\n\nfrom pandas._libs import algos as libalgos\n\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_complex_dtype,\n is_integer_dtype,\n is_list_like,\n is_numeric_dtype,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.dtypes import BaseMaskedDtype\n\nif TYPE_CHECKING:\n from pandas._typing import (\n DtypeObj,\n IndexLabel,\n )\n\n from pandas import (\n DataFrame,\n Series,\n )\n\n\nclass SelectN:\n def __init__(self, obj, n: int, keep: str) -> None:\n self.obj = obj\n self.n = n\n self.keep = keep\n\n if self.keep not in ("first", "last", "all"):\n raise ValueError('keep must be either "first", "last" or "all"')\n\n def compute(self, method: str) -> DataFrame | Series:\n raise NotImplementedError\n\n @final\n def nlargest(self):\n return self.compute("nlargest")\n\n @final\n def nsmallest(self):\n return self.compute("nsmallest")\n\n @final\n @staticmethod\n def is_valid_dtype_n_method(dtype: DtypeObj) -> bool:\n """\n Helper function to determine if dtype is valid for\n nsmallest/nlargest methods\n """\n if is_numeric_dtype(dtype):\n return not is_complex_dtype(dtype)\n return needs_i8_conversion(dtype)\n\n\nclass SelectNSeries(SelectN):\n """\n Implement n largest/smallest for Series\n\n Parameters\n ----------\n obj : Series\n n : int\n keep : {'first', 'last'}, default 'first'\n\n Returns\n -------\n nordered : Series\n """\n\n def compute(self, method: str) -> Series:\n from pandas.core.reshape.concat import concat\n\n n = self.n\n dtype = self.obj.dtype\n if not self.is_valid_dtype_n_method(dtype):\n raise TypeError(f"Cannot use method '{method}' with dtype {dtype}")\n\n if n <= 0:\n return self.obj[[]]\n\n dropped = self.obj.dropna()\n nan_index = self.obj.drop(dropped.index)\n\n # slow method\n if n >= len(self.obj):\n ascending = method == "nsmallest"\n return self.obj.sort_values(ascending=ascending).head(n)\n\n # fast method\n new_dtype = dropped.dtype\n\n # Similar to algorithms._ensure_data\n arr = dropped._values\n if needs_i8_conversion(arr.dtype):\n arr = arr.view("i8")\n elif isinstance(arr.dtype, BaseMaskedDtype):\n arr = arr._data\n else:\n arr = np.asarray(arr)\n if arr.dtype.kind == "b":\n arr = arr.view(np.uint8)\n\n if method == "nlargest":\n arr = -arr\n if is_integer_dtype(new_dtype):\n # GH 21426: ensure reverse ordering at boundaries\n arr -= 1\n\n elif is_bool_dtype(new_dtype):\n # GH 26154: ensure False is smaller than True\n arr = 1 - (-arr)\n\n if self.keep == "last":\n arr = arr[::-1]\n\n nbase = n\n narr = len(arr)\n n = min(n, narr)\n\n # arr passed into kth_smallest must be contiguous. We copy\n # here because kth_smallest will modify its input\n # avoid OOB access with kth_smallest_c when n <= 0\n if len(arr) > 0:\n kth_val = libalgos.kth_smallest(arr.copy(order="C"), n - 1)\n else:\n kth_val = np.nan\n (ns,) = np.nonzero(arr <= kth_val)\n inds = ns[arr[ns].argsort(kind="mergesort")]\n\n if self.keep != "all":\n inds = inds[:n]\n findex = nbase\n else:\n if len(inds) < nbase <= len(nan_index) + len(inds):\n findex = len(nan_index) + len(inds)\n else:\n findex = len(inds)\n\n if self.keep == "last":\n # reverse indices\n inds = narr - 1 - inds\n\n return concat([dropped.iloc[inds], nan_index]).iloc[:findex]\n\n\nclass SelectNFrame(SelectN):\n """\n Implement n largest/smallest for DataFrame\n\n Parameters\n ----------\n obj : DataFrame\n n : int\n keep : {'first', 'last'}, default 'first'\n columns : list or str\n\n Returns\n -------\n nordered : DataFrame\n """\n\n def __init__(self, obj: DataFrame, n: int, keep: str, columns: IndexLabel) -> None:\n super().__init__(obj, n, keep)\n if not is_list_like(columns) or isinstance(columns, tuple):\n columns = [columns]\n\n columns = cast(Sequence[Hashable], columns)\n columns = list(columns)\n self.columns = columns\n\n def compute(self, method: str) -> DataFrame:\n from pandas.core.api import Index\n\n n = self.n\n frame = self.obj\n columns = self.columns\n\n for column in columns:\n dtype = frame[column].dtype\n if not self.is_valid_dtype_n_method(dtype):\n raise TypeError(\n f"Column {repr(column)} has dtype {dtype}, "\n f"cannot use method {repr(method)} with this dtype"\n )\n\n def get_indexer(current_indexer, other_indexer):\n """\n Helper function to concat `current_indexer` and `other_indexer`\n depending on `method`\n """\n if method == "nsmallest":\n return current_indexer.append(other_indexer)\n else:\n return other_indexer.append(current_indexer)\n\n # Below we save and reset the index in case index contains duplicates\n original_index = frame.index\n cur_frame = frame = frame.reset_index(drop=True)\n cur_n = n\n indexer = Index([], dtype=np.int64)\n\n for i, column in enumerate(columns):\n # For each column we apply method to cur_frame[column].\n # If it's the last column or if we have the number of\n # results desired we are done.\n # Otherwise there are duplicates of the largest/smallest\n # value and we need to look at the rest of the columns\n # to determine which of the rows with the largest/smallest\n # value in the column to keep.\n series = cur_frame[column]\n is_last_column = len(columns) - 1 == i\n values = getattr(series, method)(\n cur_n, keep=self.keep if is_last_column else "all"\n )\n\n if is_last_column or len(values) <= cur_n:\n indexer = get_indexer(indexer, values.index)\n break\n\n # Now find all values which are equal to\n # the (nsmallest: largest)/(nlargest: smallest)\n # from our series.\n border_value = values == values[values.index[-1]]\n\n # Some of these values are among the top-n\n # some aren't.\n unsafe_values = values[border_value]\n\n # These values are definitely among the top-n\n safe_values = values[~border_value]\n indexer = get_indexer(indexer, safe_values.index)\n\n # Go on and separate the unsafe_values on the remaining\n # columns.\n cur_frame = cur_frame.loc[unsafe_values.index]\n cur_n = n - len(indexer)\n\n frame = frame.take(indexer)\n\n # Restore the index on frame\n frame.index = original_index.take(indexer)\n\n # If there is only one column, the frame is already sorted.\n if len(columns) == 1:\n return frame\n\n ascending = method == "nsmallest"\n\n return frame.sort_values(columns, ascending=ascending, kind="mergesort")\n | .venv\Lib\site-packages\pandas\core\methods\selectn.py | selectn.py | Python | 7,696 | 0.95 | 0.156134 | 0.126168 | vue-tools | 377 | 2024-07-25T20:54:46.788680 | GPL-3.0 | false | 59f3e501a28580b1dff0302ef745b96e |
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Literal,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n lib,\n missing as libmissing,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.cast import maybe_box_native\nfrom pandas.core.dtypes.dtypes import (\n BaseMaskedDtype,\n ExtensionDtype,\n)\n\nfrom pandas.core import common as com\n\nif TYPE_CHECKING:\n from pandas._typing import MutableMappingT\n\n from pandas import DataFrame\n\n\n@overload\ndef to_dict(\n df: DataFrame,\n orient: Literal["dict", "list", "series", "split", "tight", "index"] = ...,\n *,\n into: type[MutableMappingT] | MutableMappingT,\n index: bool = ...,\n) -> MutableMappingT:\n ...\n\n\n@overload\ndef to_dict(\n df: DataFrame,\n orient: Literal["records"],\n *,\n into: type[MutableMappingT] | MutableMappingT,\n index: bool = ...,\n) -> list[MutableMappingT]:\n ...\n\n\n@overload\ndef to_dict(\n df: DataFrame,\n orient: Literal["dict", "list", "series", "split", "tight", "index"] = ...,\n *,\n into: type[dict] = ...,\n index: bool = ...,\n) -> dict:\n ...\n\n\n@overload\ndef to_dict(\n df: DataFrame,\n orient: Literal["records"],\n *,\n into: type[dict] = ...,\n index: bool = ...,\n) -> list[dict]:\n ...\n\n\n# error: Incompatible default for argument "into" (default has type "type[dict\n# [Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT")\ndef to_dict(\n df: DataFrame,\n orient: Literal[\n "dict", "list", "series", "split", "tight", "records", "index"\n ] = "dict",\n *,\n into: type[MutableMappingT] | MutableMappingT = dict, # type: ignore[assignment]\n index: bool = True,\n) -> MutableMappingT | list[MutableMappingT]:\n """\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'tight' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values],\n 'index_names' -> [index.names], 'column_names' -> [column.names]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n .. versionadded:: 1.4.0\n 'tight' as an allowed value for the ``orient`` argument\n\n into : class, default dict\n The collections.abc.MutableMapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n index : bool, default True\n Whether to include the index item (and index_names item if `orient`\n is 'tight') in the returned dictionary. Can only be ``False``\n when `orient` is 'split' or 'tight'.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.MutableMapping object representing the\n DataFrame. The resulting transformation depends on the `orient` parameter.\n """\n if not df.columns.is_unique:\n warnings.warn(\n "DataFrame columns are not unique, some columns will be omitted.",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n # GH16122\n into_c = com.standardize_mapping(into)\n\n # error: Incompatible types in assignment (expression has type "str",\n # variable has type "Literal['dict', 'list', 'series', 'split', 'tight',\n # 'records', 'index']")\n orient = orient.lower() # type: ignore[assignment]\n\n if not index and orient not in ["split", "tight"]:\n raise ValueError(\n "'index=False' is only valid when 'orient' is 'split' or 'tight'"\n )\n\n if orient == "series":\n # GH46470 Return quickly if orient series to avoid creating dtype objects\n return into_c((k, v) for k, v in df.items())\n\n box_native_indices = [\n i\n for i, col_dtype in enumerate(df.dtypes.values)\n if col_dtype == np.dtype(object) or isinstance(col_dtype, ExtensionDtype)\n ]\n box_na_values = [\n lib.no_default if not isinstance(col_dtype, BaseMaskedDtype) else libmissing.NA\n for i, col_dtype in enumerate(df.dtypes.values)\n ]\n are_all_object_dtype_cols = len(box_native_indices) == len(df.dtypes)\n\n if orient == "dict":\n return into_c((k, v.to_dict(into=into)) for k, v in df.items())\n\n elif orient == "list":\n object_dtype_indices_as_set: set[int] = set(box_native_indices)\n return into_c(\n (\n k,\n list(map(maybe_box_native, v.to_numpy(na_value=box_na_values[i])))\n if i in object_dtype_indices_as_set\n else list(map(maybe_box_native, v.to_numpy())),\n )\n for i, (k, v) in enumerate(df.items())\n )\n\n elif orient == "split":\n data = df._create_data_for_split_and_tight_to_dict(\n are_all_object_dtype_cols, box_native_indices\n )\n\n return into_c(\n ((("index", df.index.tolist()),) if index else ())\n + (\n ("columns", df.columns.tolist()),\n ("data", data),\n )\n )\n\n elif orient == "tight":\n data = df._create_data_for_split_and_tight_to_dict(\n are_all_object_dtype_cols, box_native_indices\n )\n\n return into_c(\n ((("index", df.index.tolist()),) if index else ())\n + (\n ("columns", df.columns.tolist()),\n (\n "data",\n [\n list(map(maybe_box_native, t))\n for t in df.itertuples(index=False, name=None)\n ],\n ),\n )\n + ((("index_names", list(df.index.names)),) if index else ())\n + (("column_names", list(df.columns.names)),)\n )\n\n elif orient == "records":\n columns = df.columns.tolist()\n if are_all_object_dtype_cols:\n rows = (\n dict(zip(columns, row)) for row in df.itertuples(index=False, name=None)\n )\n return [\n into_c((k, maybe_box_native(v)) for k, v in row.items()) for row in rows\n ]\n else:\n data = [\n into_c(zip(columns, t)) for t in df.itertuples(index=False, name=None)\n ]\n if box_native_indices:\n object_dtype_indices_as_set = set(box_native_indices)\n object_dtype_cols = {\n col\n for i, col in enumerate(df.columns)\n if i in object_dtype_indices_as_set\n }\n for row in data:\n for col in object_dtype_cols:\n row[col] = maybe_box_native(row[col])\n return data\n\n elif orient == "index":\n if not df.index.is_unique:\n raise ValueError("DataFrame index must be unique for orient='index'.")\n columns = df.columns.tolist()\n if are_all_object_dtype_cols:\n return into_c(\n (t[0], dict(zip(df.columns, map(maybe_box_native, t[1:]))))\n for t in df.itertuples(name=None)\n )\n elif box_native_indices:\n object_dtype_indices_as_set = set(box_native_indices)\n is_object_dtype_by_index = [\n i in object_dtype_indices_as_set for i in range(len(df.columns))\n ]\n return into_c(\n (\n t[0],\n {\n columns[i]: maybe_box_native(v)\n if is_object_dtype_by_index[i]\n else v\n for i, v in enumerate(t[1:])\n },\n )\n for t in df.itertuples(name=None)\n )\n else:\n return into_c(\n (t[0], dict(zip(df.columns, t[1:]))) for t in df.itertuples(name=None)\n )\n\n else:\n raise ValueError(f"orient '{orient}' not understood")\n | .venv\Lib\site-packages\pandas\core\methods\to_dict.py | to_dict.py | Python | 8,649 | 0.95 | 0.176471 | 0.051282 | python-kit | 857 | 2025-04-11T17:38:27.496104 | BSD-3-Clause | false | fe8891c7c57ace29917fc6f4b5e36553 |
\n\n | .venv\Lib\site-packages\pandas\core\methods\__pycache__\describe.cpython-313.pyc | describe.cpython-313.pyc | Other | 14,905 | 0.95 | 0.057416 | 0 | vue-tools | 273 | 2023-09-14T23:27:42.996222 | Apache-2.0 | false | b6c401413b34df985f4336fd5e7654a6 |
\n\n | .venv\Lib\site-packages\pandas\core\methods\__pycache__\selectn.cpython-313.pyc | selectn.cpython-313.pyc | Other | 9,308 | 0.95 | 0.058824 | 0 | node-utils | 770 | 2024-08-13T13:05:07.690969 | MIT | false | 206c12657e4d62872ef1d8b9067eb1f0 |
\n\n | .venv\Lib\site-packages\pandas\core\methods\__pycache__\to_dict.cpython-313.pyc | to_dict.cpython-313.pyc | Other | 11,404 | 0.95 | 0.035294 | 0.006494 | node-utils | 484 | 2025-03-19T06:33:57.391942 | Apache-2.0 | false | 2452943235b3579d582ffb09a9336fee |
\n\n | .venv\Lib\site-packages\pandas\core\methods\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 194 | 0.7 | 0 | 0 | awesome-app | 89 | 2024-07-27T15:46:16.911673 | Apache-2.0 | false | 924e89f68ec70859b8598f202e9e0003 |
"""\nFunctions for arithmetic and comparison operations on NumPy arrays and\nExtensionArrays.\n"""\nfrom __future__ import annotations\n\nimport datetime\nfrom functools import partial\nimport operator\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n NaT,\n Timedelta,\n Timestamp,\n lib,\n ops as libops,\n)\nfrom pandas._libs.tslibs import (\n BaseOffset,\n get_supported_dtype,\n is_supported_dtype,\n is_unitless,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.cast import (\n construct_1d_object_array_from_listlike,\n find_common_type,\n)\nfrom pandas.core.dtypes.common import (\n ensure_object,\n is_bool_dtype,\n is_list_like,\n is_numeric_v_string_like,\n is_object_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.generic import (\n ABCExtensionArray,\n ABCIndex,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import (\n isna,\n notna,\n)\n\nfrom pandas.core import roperator\nfrom pandas.core.computation import expressions\nfrom pandas.core.construction import ensure_wrapped_if_datetimelike\nfrom pandas.core.ops import missing\nfrom pandas.core.ops.dispatch import should_extension_dispatch\nfrom pandas.core.ops.invalid import invalid_comparison\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n Shape,\n )\n\n# -----------------------------------------------------------------------------\n# Masking NA values and fallbacks for operations numpy does not support\n\n\ndef fill_binop(left, right, fill_value):\n """\n If a non-None fill_value is given, replace null entries in left and right\n with this value, but only in positions where _one_ of left/right is null,\n not both.\n\n Parameters\n ----------\n left : array-like\n right : array-like\n fill_value : object\n\n Returns\n -------\n left : array-like\n right : array-like\n\n Notes\n -----\n Makes copies if fill_value is not None and NAs are present.\n """\n if fill_value is not None:\n left_mask = isna(left)\n right_mask = isna(right)\n\n # one but not both\n mask = left_mask ^ right_mask\n\n if left_mask.any():\n # Avoid making a copy if we can\n left = left.copy()\n left[left_mask & mask] = fill_value\n\n if right_mask.any():\n # Avoid making a copy if we can\n right = right.copy()\n right[right_mask & mask] = fill_value\n\n return left, right\n\n\ndef comp_method_OBJECT_ARRAY(op, x, y):\n if isinstance(y, list):\n # e.g. test_tuple_categories\n y = construct_1d_object_array_from_listlike(y)\n\n if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):\n if not is_object_dtype(y.dtype):\n y = y.astype(np.object_)\n\n if isinstance(y, (ABCSeries, ABCIndex)):\n y = y._values\n\n if x.shape != y.shape:\n raise ValueError("Shapes must match", x.shape, y.shape)\n result = libops.vec_compare(x.ravel(), y.ravel(), op)\n else:\n result = libops.scalar_compare(x.ravel(), y, op)\n return result.reshape(x.shape)\n\n\ndef _masked_arith_op(x: np.ndarray, y, op):\n """\n If the given arithmetic operation fails, attempt it again on\n only the non-null elements of the input array(s).\n\n Parameters\n ----------\n x : np.ndarray\n y : np.ndarray, Series, Index\n op : binary operator\n """\n # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes\n # the logic valid for both Series and DataFrame ops.\n xrav = x.ravel()\n\n if isinstance(y, np.ndarray):\n dtype = find_common_type([x.dtype, y.dtype])\n result = np.empty(x.size, dtype=dtype)\n\n if len(x) != len(y):\n raise ValueError(x.shape, y.shape)\n ymask = notna(y)\n\n # NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex\n # we would get int64 dtype, see GH#19956\n yrav = y.ravel()\n mask = notna(xrav) & ymask.ravel()\n\n # See GH#5284, GH#5035, GH#19448 for historical reference\n if mask.any():\n result[mask] = op(xrav[mask], yrav[mask])\n\n else:\n if not is_scalar(y):\n raise TypeError(\n f"Cannot broadcast np.ndarray with operand of type { type(y) }"\n )\n\n # mask is only meaningful for x\n result = np.empty(x.size, dtype=x.dtype)\n mask = notna(xrav)\n\n # 1 ** np.nan is 1. So we have to unmask those.\n if op is pow:\n mask = np.where(x == 1, False, mask)\n elif op is roperator.rpow:\n mask = np.where(y == 1, False, mask)\n\n if mask.any():\n result[mask] = op(xrav[mask], y)\n\n np.putmask(result, ~mask, np.nan)\n result = result.reshape(x.shape) # 2D compat\n return result\n\n\ndef _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False):\n """\n Return the result of evaluating op on the passed in values.\n\n If native types are not compatible, try coercion to object dtype.\n\n Parameters\n ----------\n left : np.ndarray\n right : np.ndarray or scalar\n Excludes DataFrame, Series, Index, ExtensionArray.\n is_cmp : bool, default False\n If this a comparison operation.\n\n Returns\n -------\n array-like\n\n Raises\n ------\n TypeError : invalid operation\n """\n if isinstance(right, str):\n # can never use numexpr\n func = op\n else:\n func = partial(expressions.evaluate, op)\n\n try:\n result = func(left, right)\n except TypeError:\n if not is_cmp and (\n left.dtype == object or getattr(right, "dtype", None) == object\n ):\n # For object dtype, fallback to a masked operation (only operating\n # on the non-missing values)\n # Don't do this for comparisons, as that will handle complex numbers\n # incorrectly, see GH#32047\n result = _masked_arith_op(left, right, op)\n else:\n raise\n\n if is_cmp and (is_scalar(result) or result is NotImplemented):\n # numpy returned a scalar instead of operating element-wise\n # e.g. numeric array vs str\n # TODO: can remove this after dropping some future numpy version?\n return invalid_comparison(left, right, op)\n\n return missing.dispatch_fill_zeros(op, left, right, result)\n\n\ndef arithmetic_op(left: ArrayLike, right: Any, op):\n """\n Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ...\n\n Note: the caller is responsible for ensuring that numpy warnings are\n suppressed (with np.errstate(all="ignore")) if needed.\n\n Parameters\n ----------\n left : np.ndarray or ExtensionArray\n right : object\n Cannot be a DataFrame or Index. Series is *not* excluded.\n op : {operator.add, operator.sub, ...}\n Or one of the reversed variants from roperator.\n\n Returns\n -------\n ndarray or ExtensionArray\n Or a 2-tuple of these in the case of divmod or rdivmod.\n """\n # NB: We assume that extract_array and ensure_wrapped_if_datetimelike\n # have already been called on `left` and `right`,\n # and `maybe_prepare_scalar_for_op` has already been called on `right`\n # We need to special-case datetime64/timedelta64 dtypes (e.g. because numpy\n # casts integer dtypes to timedelta64 when operating with timedelta64 - GH#22390)\n\n if (\n should_extension_dispatch(left, right)\n or isinstance(right, (Timedelta, BaseOffset, Timestamp))\n or right is NaT\n ):\n # Timedelta/Timestamp and other custom scalars are included in the check\n # because numexpr will fail on it, see GH#31457\n res_values = op(left, right)\n else:\n # TODO we should handle EAs consistently and move this check before the if/else\n # (https://github.com/pandas-dev/pandas/issues/41165)\n # error: Argument 2 to "_bool_arith_check" has incompatible type\n # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]"\n _bool_arith_check(op, left, right) # type: ignore[arg-type]\n\n # error: Argument 1 to "_na_arithmetic_op" has incompatible type\n # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]"\n res_values = _na_arithmetic_op(left, right, op) # type: ignore[arg-type]\n\n return res_values\n\n\ndef comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike:\n """\n Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`.\n\n Note: the caller is responsible for ensuring that numpy warnings are\n suppressed (with np.errstate(all="ignore")) if needed.\n\n Parameters\n ----------\n left : np.ndarray or ExtensionArray\n right : object\n Cannot be a DataFrame, Series, or Index.\n op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le}\n\n Returns\n -------\n ndarray or ExtensionArray\n """\n # NB: We assume extract_array has already been called on left and right\n lvalues = ensure_wrapped_if_datetimelike(left)\n rvalues = ensure_wrapped_if_datetimelike(right)\n\n rvalues = lib.item_from_zerodim(rvalues)\n if isinstance(rvalues, list):\n # We don't catch tuple here bc we may be comparing e.g. MultiIndex\n # to a tuple that represents a single entry, see test_compare_tuple_strs\n rvalues = np.asarray(rvalues)\n\n if isinstance(rvalues, (np.ndarray, ABCExtensionArray)):\n # TODO: make this treatment consistent across ops and classes.\n # We are not catching all listlikes here (e.g. frozenset, tuple)\n # The ambiguous case is object-dtype. See GH#27803\n if len(lvalues) != len(rvalues):\n raise ValueError(\n "Lengths must match to compare", lvalues.shape, rvalues.shape\n )\n\n if should_extension_dispatch(lvalues, rvalues) or (\n (isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT)\n and lvalues.dtype != object\n ):\n # Call the method on lvalues\n res_values = op(lvalues, rvalues)\n\n elif is_scalar(rvalues) and isna(rvalues): # TODO: but not pd.NA?\n # numpy does not like comparisons vs None\n if op is operator.ne:\n res_values = np.ones(lvalues.shape, dtype=bool)\n else:\n res_values = np.zeros(lvalues.shape, dtype=bool)\n\n elif is_numeric_v_string_like(lvalues, rvalues):\n # GH#36377 going through the numexpr path would incorrectly raise\n return invalid_comparison(lvalues, rvalues, op)\n\n elif lvalues.dtype == object or isinstance(rvalues, str):\n res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)\n\n else:\n res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True)\n\n return res_values\n\n\ndef na_logical_op(x: np.ndarray, y, op):\n try:\n # For exposition, write:\n # yarr = isinstance(y, np.ndarray)\n # yint = is_integer(y) or (yarr and y.dtype.kind == "i")\n # ybool = is_bool(y) or (yarr and y.dtype.kind == "b")\n # xint = x.dtype.kind == "i"\n # xbool = x.dtype.kind == "b"\n # Then Cases where this goes through without raising include:\n # (xint or xbool) and (yint or bool)\n result = op(x, y)\n except TypeError:\n if isinstance(y, np.ndarray):\n # bool-bool dtype operations should be OK, should not get here\n assert not (x.dtype.kind == "b" and y.dtype.kind == "b")\n x = ensure_object(x)\n y = ensure_object(y)\n result = libops.vec_binop(x.ravel(), y.ravel(), op)\n else:\n # let null fall thru\n assert lib.is_scalar(y)\n if not isna(y):\n y = bool(y)\n try:\n result = libops.scalar_binop(x, y, op)\n except (\n TypeError,\n ValueError,\n AttributeError,\n OverflowError,\n NotImplementedError,\n ) as err:\n typ = type(y).__name__\n raise TypeError(\n f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array "\n f"and scalar of type [{typ}]"\n ) from err\n\n return result.reshape(x.shape)\n\n\ndef logical_op(left: ArrayLike, right: Any, op) -> ArrayLike:\n """\n Evaluate a logical operation `|`, `&`, or `^`.\n\n Parameters\n ----------\n left : np.ndarray or ExtensionArray\n right : object\n Cannot be a DataFrame, Series, or Index.\n op : {operator.and_, operator.or_, operator.xor}\n Or one of the reversed variants from roperator.\n\n Returns\n -------\n ndarray or ExtensionArray\n """\n\n def fill_bool(x, left=None):\n # if `left` is specifically not-boolean, we do not cast to bool\n if x.dtype.kind in "cfO":\n # dtypes that can hold NA\n mask = isna(x)\n if mask.any():\n x = x.astype(object)\n x[mask] = False\n\n if left is None or left.dtype.kind == "b":\n x = x.astype(bool)\n return x\n\n right = lib.item_from_zerodim(right)\n if is_list_like(right) and not hasattr(right, "dtype"):\n # e.g. list, tuple\n warnings.warn(\n "Logical ops (and, or, xor) between Pandas objects and dtype-less "\n "sequences (e.g. list, tuple) are deprecated and will raise in a "\n "future version. Wrap the object in a Series, Index, or np.array "\n "before operating instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n right = construct_1d_object_array_from_listlike(right)\n\n # NB: We assume extract_array has already been called on left and right\n lvalues = ensure_wrapped_if_datetimelike(left)\n rvalues = right\n\n if should_extension_dispatch(lvalues, rvalues):\n # Call the method on lvalues\n res_values = op(lvalues, rvalues)\n\n else:\n if isinstance(rvalues, np.ndarray):\n is_other_int_dtype = rvalues.dtype.kind in "iu"\n if not is_other_int_dtype:\n rvalues = fill_bool(rvalues, lvalues)\n\n else:\n # i.e. scalar\n is_other_int_dtype = lib.is_integer(rvalues)\n\n res_values = na_logical_op(lvalues, rvalues, op)\n\n # For int vs int `^`, `|`, `&` are bitwise operators and return\n # integer dtypes. Otherwise these are boolean ops\n if not (left.dtype.kind in "iu" and is_other_int_dtype):\n res_values = fill_bool(res_values)\n\n return res_values\n\n\ndef get_array_op(op):\n """\n Return a binary array operation corresponding to the given operator op.\n\n Parameters\n ----------\n op : function\n Binary operator from operator or roperator module.\n\n Returns\n -------\n functools.partial\n """\n if isinstance(op, partial):\n # We get here via dispatch_to_series in DataFrame case\n # e.g. test_rolling_consistency_var_debiasing_factors\n return op\n\n op_name = op.__name__.strip("_").lstrip("r")\n if op_name == "arith_op":\n # Reached via DataFrame._combine_frame i.e. flex methods\n # e.g. test_df_add_flex_filled_mixed_dtypes\n return op\n\n if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}:\n return partial(comparison_op, op=op)\n elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}:\n return partial(logical_op, op=op)\n elif op_name in {\n "add",\n "sub",\n "mul",\n "truediv",\n "floordiv",\n "mod",\n "divmod",\n "pow",\n }:\n return partial(arithmetic_op, op=op)\n else:\n raise NotImplementedError(op_name)\n\n\ndef maybe_prepare_scalar_for_op(obj, shape: Shape):\n """\n Cast non-pandas objects to pandas types to unify behavior of arithmetic\n and comparison operations.\n\n Parameters\n ----------\n obj: object\n shape : tuple[int]\n\n Returns\n -------\n out : object\n\n Notes\n -----\n Be careful to call this *after* determining the `name` attribute to be\n attached to the result of the arithmetic operation.\n """\n if type(obj) is datetime.timedelta:\n # GH#22390 cast up to Timedelta to rely on Timedelta\n # implementation; otherwise operation against numeric-dtype\n # raises TypeError\n return Timedelta(obj)\n elif type(obj) is datetime.datetime:\n # cast up to Timestamp to rely on Timestamp implementation, see Timedelta above\n return Timestamp(obj)\n elif isinstance(obj, np.datetime64):\n # GH#28080 numpy casts integer-dtype to datetime64 when doing\n # array[int] + datetime64, which we do not allow\n if isna(obj):\n from pandas.core.arrays import DatetimeArray\n\n # Avoid possible ambiguities with pd.NaT\n # GH 52295\n if is_unitless(obj.dtype):\n obj = obj.astype("datetime64[ns]")\n elif not is_supported_dtype(obj.dtype):\n new_dtype = get_supported_dtype(obj.dtype)\n obj = obj.astype(new_dtype)\n right = np.broadcast_to(obj, shape)\n return DatetimeArray._simple_new(right, dtype=right.dtype)\n\n return Timestamp(obj)\n\n elif isinstance(obj, np.timedelta64):\n if isna(obj):\n from pandas.core.arrays import TimedeltaArray\n\n # wrapping timedelta64("NaT") in Timedelta returns NaT,\n # which would incorrectly be treated as a datetime-NaT, so\n # we broadcast and wrap in a TimedeltaArray\n # GH 52295\n if is_unitless(obj.dtype):\n obj = obj.astype("timedelta64[ns]")\n elif not is_supported_dtype(obj.dtype):\n new_dtype = get_supported_dtype(obj.dtype)\n obj = obj.astype(new_dtype)\n right = np.broadcast_to(obj, shape)\n return TimedeltaArray._simple_new(right, dtype=right.dtype)\n\n # In particular non-nanosecond timedelta64 needs to be cast to\n # nanoseconds, or else we get undesired behavior like\n # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')\n return Timedelta(obj)\n\n # We want NumPy numeric scalars to behave like Python scalars\n # post NEP 50\n elif isinstance(obj, np.integer):\n return int(obj)\n\n elif isinstance(obj, np.floating):\n return float(obj)\n\n return obj\n\n\n_BOOL_OP_NOT_ALLOWED = {\n operator.truediv,\n roperator.rtruediv,\n operator.floordiv,\n roperator.rfloordiv,\n operator.pow,\n roperator.rpow,\n}\n\n\ndef _bool_arith_check(op, a: np.ndarray, b):\n """\n In contrast to numpy, pandas raises an error for certain operations\n with booleans.\n """\n if op in _BOOL_OP_NOT_ALLOWED:\n if a.dtype.kind == "b" and (is_bool_dtype(b) or lib.is_bool(b)):\n op_name = op.__name__.strip("_").lstrip("r")\n raise NotImplementedError(\n f"operator '{op_name}' not implemented for bool dtypes"\n )\n | .venv\Lib\site-packages\pandas\core\ops\array_ops.py | array_ops.py | Python | 19,079 | 0.95 | 0.13245 | 0.163673 | node-utils | 378 | 2024-10-22T05:30:57.871286 | MIT | false | d3499991b862b7998e8329d1be00b81b |
"""\nBoilerplate functions used in defining binary operations.\n"""\nfrom __future__ import annotations\n\nfrom functools import wraps\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n)\n\nfrom pandas._libs.lib import item_from_zerodim\nfrom pandas._libs.missing import is_matching_na\n\nfrom pandas.core.dtypes.generic import (\n ABCIndex,\n ABCSeries,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import F\n\n\ndef unpack_zerodim_and_defer(name: str) -> Callable[[F], F]:\n """\n Boilerplate for pandas conventions in arithmetic and comparison methods.\n\n Parameters\n ----------\n name : str\n\n Returns\n -------\n decorator\n """\n\n def wrapper(method: F) -> F:\n return _unpack_zerodim_and_defer(method, name)\n\n return wrapper\n\n\ndef _unpack_zerodim_and_defer(method, name: str):\n """\n Boilerplate for pandas conventions in arithmetic and comparison methods.\n\n Ensure method returns NotImplemented when operating against "senior"\n classes. Ensure zero-dimensional ndarrays are always unpacked.\n\n Parameters\n ----------\n method : binary method\n name : str\n\n Returns\n -------\n method\n """\n stripped_name = name.removeprefix("__").removesuffix("__")\n is_cmp = stripped_name in {"eq", "ne", "lt", "le", "gt", "ge"}\n\n @wraps(method)\n def new_method(self, other):\n if is_cmp and isinstance(self, ABCIndex) and isinstance(other, ABCSeries):\n # For comparison ops, Index does *not* defer to Series\n pass\n else:\n prio = getattr(other, "__pandas_priority__", None)\n if prio is not None:\n if prio > self.__pandas_priority__:\n # e.g. other is DataFrame while self is Index/Series/EA\n return NotImplemented\n\n other = item_from_zerodim(other)\n\n return method(self, other)\n\n return new_method\n\n\ndef get_op_result_name(left, right):\n """\n Find the appropriate name to pin to an operation result. This result\n should always be either an Index or a Series.\n\n Parameters\n ----------\n left : {Series, Index}\n right : object\n\n Returns\n -------\n name : object\n Usually a string\n """\n if isinstance(right, (ABCSeries, ABCIndex)):\n name = _maybe_match_name(left, right)\n else:\n name = left.name\n return name\n\n\ndef _maybe_match_name(a, b):\n """\n Try to find a name to attach to the result of an operation between\n a and b. If only one of these has a `name` attribute, return that\n name. Otherwise return a consensus name if they match or None if\n they have different names.\n\n Parameters\n ----------\n a : object\n b : object\n\n Returns\n -------\n name : str or None\n\n See Also\n --------\n pandas.core.common.consensus_name_attr\n """\n a_has = hasattr(a, "name")\n b_has = hasattr(b, "name")\n if a_has and b_has:\n try:\n if a.name == b.name:\n return a.name\n elif is_matching_na(a.name, b.name):\n # e.g. both are np.nan\n return a.name\n else:\n return None\n except TypeError:\n # pd.NA\n if is_matching_na(a.name, b.name):\n return a.name\n return None\n except ValueError:\n # e.g. np.int64(1) vs (np.int64(1), np.int64(2))\n return None\n elif a_has:\n return a.name\n elif b_has:\n return b.name\n return None\n | .venv\Lib\site-packages\pandas\core\ops\common.py | common.py | Python | 3,500 | 0.95 | 0.136986 | 0.042373 | python-kit | 353 | 2023-07-31T01:04:17.590644 | BSD-3-Clause | false | 461adeefa8daa12766a7e3f063ac96f5 |
"""\nFunctions for defining unary operations.\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nfrom pandas.core.dtypes.generic import ABCExtensionArray\n\nif TYPE_CHECKING:\n from pandas._typing import ArrayLike\n\n\ndef should_extension_dispatch(left: ArrayLike, right: Any) -> bool:\n """\n Identify cases where Series operation should dispatch to ExtensionArray method.\n\n Parameters\n ----------\n left : np.ndarray or ExtensionArray\n right : object\n\n Returns\n -------\n bool\n """\n return isinstance(left, ABCExtensionArray) or isinstance(right, ABCExtensionArray)\n | .venv\Lib\site-packages\pandas\core\ops\dispatch.py | dispatch.py | Python | 635 | 0.85 | 0.1 | 0 | awesome-app | 835 | 2023-09-26T12:26:23.373435 | Apache-2.0 | false | de2da8e8eb8d784f49623121c1b98a2b |
"""\nTemplating for ops docstrings\n"""\nfrom __future__ import annotations\n\n\ndef make_flex_doc(op_name: str, typ: str) -> str:\n """\n Make the appropriate substitutions for the given operation and class-typ\n into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring\n to attach to a generated method.\n\n Parameters\n ----------\n op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}\n typ : str {series, 'dataframe']}\n\n Returns\n -------\n doc : str\n """\n op_name = op_name.replace("__", "")\n op_desc = _op_descriptions[op_name]\n\n op_desc_op = op_desc["op"]\n assert op_desc_op is not None # for mypy\n if op_name.startswith("r"):\n equiv = f"other {op_desc_op} {typ}"\n elif op_name == "divmod":\n equiv = f"{op_name}({typ}, other)"\n else:\n equiv = f"{typ} {op_desc_op} other"\n\n if typ == "series":\n base_doc = _flex_doc_SERIES\n if op_desc["reverse"]:\n base_doc += _see_also_reverse_SERIES.format(\n reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"]\n )\n doc_no_examples = base_doc.format(\n desc=op_desc["desc"],\n op_name=op_name,\n equiv=equiv,\n series_returns=op_desc["series_returns"],\n )\n ser_example = op_desc["series_examples"]\n if ser_example:\n doc = doc_no_examples + ser_example\n else:\n doc = doc_no_examples\n elif typ == "dataframe":\n if op_name in ["eq", "ne", "le", "lt", "ge", "gt"]:\n base_doc = _flex_comp_doc_FRAME\n doc = _flex_comp_doc_FRAME.format(\n op_name=op_name,\n desc=op_desc["desc"],\n )\n else:\n base_doc = _flex_doc_FRAME\n doc = base_doc.format(\n desc=op_desc["desc"],\n op_name=op_name,\n equiv=equiv,\n reverse=op_desc["reverse"],\n )\n else:\n raise AssertionError("Invalid typ argument.")\n return doc\n\n\n_common_examples_algebra_SERIES = """\nExamples\n--------\n>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])\n>>> a\na 1.0\nb 1.0\nc 1.0\nd NaN\ndtype: float64\n>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])\n>>> b\na 1.0\nb NaN\nd 1.0\ne NaN\ndtype: float64"""\n\n_common_examples_comparison_SERIES = """\nExamples\n--------\n>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e'])\n>>> a\na 1.0\nb 1.0\nc 1.0\nd NaN\ne 1.0\ndtype: float64\n>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f'])\n>>> b\na 0.0\nb 1.0\nc 2.0\nd NaN\nf 1.0\ndtype: float64"""\n\n_add_example_SERIES = (\n _common_examples_algebra_SERIES\n + """\n>>> a.add(b, fill_value=0)\na 2.0\nb 1.0\nc 1.0\nd 1.0\ne NaN\ndtype: float64\n"""\n)\n\n_sub_example_SERIES = (\n _common_examples_algebra_SERIES\n + """\n>>> a.subtract(b, fill_value=0)\na 0.0\nb 1.0\nc 1.0\nd -1.0\ne NaN\ndtype: float64\n"""\n)\n\n_mul_example_SERIES = (\n _common_examples_algebra_SERIES\n + """\n>>> a.multiply(b, fill_value=0)\na 1.0\nb 0.0\nc 0.0\nd 0.0\ne NaN\ndtype: float64\n"""\n)\n\n_div_example_SERIES = (\n _common_examples_algebra_SERIES\n + """\n>>> a.divide(b, fill_value=0)\na 1.0\nb inf\nc inf\nd 0.0\ne NaN\ndtype: float64\n"""\n)\n\n_floordiv_example_SERIES = (\n _common_examples_algebra_SERIES\n + """\n>>> a.floordiv(b, fill_value=0)\na 1.0\nb inf\nc inf\nd 0.0\ne NaN\ndtype: float64\n"""\n)\n\n_divmod_example_SERIES = (\n _common_examples_algebra_SERIES\n + """\n>>> a.divmod(b, fill_value=0)\n(a 1.0\n b inf\n c inf\n d 0.0\n e NaN\n dtype: float64,\n a 0.0\n b NaN\n c NaN\n d 0.0\n e NaN\n dtype: float64)\n"""\n)\n\n_mod_example_SERIES = (\n _common_examples_algebra_SERIES\n + """\n>>> a.mod(b, fill_value=0)\na 0.0\nb NaN\nc NaN\nd 0.0\ne NaN\ndtype: float64\n"""\n)\n_pow_example_SERIES = (\n _common_examples_algebra_SERIES\n + """\n>>> a.pow(b, fill_value=0)\na 1.0\nb 1.0\nc 1.0\nd 0.0\ne NaN\ndtype: float64\n"""\n)\n\n_ne_example_SERIES = (\n _common_examples_algebra_SERIES\n + """\n>>> a.ne(b, fill_value=0)\na False\nb True\nc True\nd True\ne True\ndtype: bool\n"""\n)\n\n_eq_example_SERIES = (\n _common_examples_algebra_SERIES\n + """\n>>> a.eq(b, fill_value=0)\na True\nb False\nc False\nd False\ne False\ndtype: bool\n"""\n)\n\n_lt_example_SERIES = (\n _common_examples_comparison_SERIES\n + """\n>>> a.lt(b, fill_value=0)\na False\nb False\nc True\nd False\ne False\nf True\ndtype: bool\n"""\n)\n\n_le_example_SERIES = (\n _common_examples_comparison_SERIES\n + """\n>>> a.le(b, fill_value=0)\na False\nb True\nc True\nd False\ne False\nf True\ndtype: bool\n"""\n)\n\n_gt_example_SERIES = (\n _common_examples_comparison_SERIES\n + """\n>>> a.gt(b, fill_value=0)\na True\nb False\nc False\nd False\ne True\nf False\ndtype: bool\n"""\n)\n\n_ge_example_SERIES = (\n _common_examples_comparison_SERIES\n + """\n>>> a.ge(b, fill_value=0)\na True\nb True\nc False\nd False\ne True\nf False\ndtype: bool\n"""\n)\n\n_returns_series = """Series\n The result of the operation."""\n\n_returns_tuple = """2-Tuple of Series\n The result of the operation."""\n\n_op_descriptions: dict[str, dict[str, str | None]] = {\n # Arithmetic Operators\n "add": {\n "op": "+",\n "desc": "Addition",\n "reverse": "radd",\n "series_examples": _add_example_SERIES,\n "series_returns": _returns_series,\n },\n "sub": {\n "op": "-",\n "desc": "Subtraction",\n "reverse": "rsub",\n "series_examples": _sub_example_SERIES,\n "series_returns": _returns_series,\n },\n "mul": {\n "op": "*",\n "desc": "Multiplication",\n "reverse": "rmul",\n "series_examples": _mul_example_SERIES,\n "series_returns": _returns_series,\n "df_examples": None,\n },\n "mod": {\n "op": "%",\n "desc": "Modulo",\n "reverse": "rmod",\n "series_examples": _mod_example_SERIES,\n "series_returns": _returns_series,\n },\n "pow": {\n "op": "**",\n "desc": "Exponential power",\n "reverse": "rpow",\n "series_examples": _pow_example_SERIES,\n "series_returns": _returns_series,\n "df_examples": None,\n },\n "truediv": {\n "op": "/",\n "desc": "Floating division",\n "reverse": "rtruediv",\n "series_examples": _div_example_SERIES,\n "series_returns": _returns_series,\n "df_examples": None,\n },\n "floordiv": {\n "op": "//",\n "desc": "Integer division",\n "reverse": "rfloordiv",\n "series_examples": _floordiv_example_SERIES,\n "series_returns": _returns_series,\n "df_examples": None,\n },\n "divmod": {\n "op": "divmod",\n "desc": "Integer division and modulo",\n "reverse": "rdivmod",\n "series_examples": _divmod_example_SERIES,\n "series_returns": _returns_tuple,\n "df_examples": None,\n },\n # Comparison Operators\n "eq": {\n "op": "==",\n "desc": "Equal to",\n "reverse": None,\n "series_examples": _eq_example_SERIES,\n "series_returns": _returns_series,\n },\n "ne": {\n "op": "!=",\n "desc": "Not equal to",\n "reverse": None,\n "series_examples": _ne_example_SERIES,\n "series_returns": _returns_series,\n },\n "lt": {\n "op": "<",\n "desc": "Less than",\n "reverse": None,\n "series_examples": _lt_example_SERIES,\n "series_returns": _returns_series,\n },\n "le": {\n "op": "<=",\n "desc": "Less than or equal to",\n "reverse": None,\n "series_examples": _le_example_SERIES,\n "series_returns": _returns_series,\n },\n "gt": {\n "op": ">",\n "desc": "Greater than",\n "reverse": None,\n "series_examples": _gt_example_SERIES,\n "series_returns": _returns_series,\n },\n "ge": {\n "op": ">=",\n "desc": "Greater than or equal to",\n "reverse": None,\n "series_examples": _ge_example_SERIES,\n "series_returns": _returns_series,\n },\n}\n\n_py_num_ref = """see\n `Python documentation\n <https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_\n for more details"""\n_op_names = list(_op_descriptions.keys())\nfor key in _op_names:\n reverse_op = _op_descriptions[key]["reverse"]\n if reverse_op is not None:\n _op_descriptions[reverse_op] = _op_descriptions[key].copy()\n _op_descriptions[reverse_op]["reverse"] = key\n _op_descriptions[key][\n "see_also_desc"\n ] = f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}"\n _op_descriptions[reverse_op][\n "see_also_desc"\n ] = f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}"\n\n_flex_doc_SERIES = """\nReturn {desc} of series and other, element-wise (binary operator `{op_name}`).\n\nEquivalent to ``{equiv}``, but with support to substitute a fill_value for\nmissing data in either one of the inputs.\n\nParameters\n----------\nother : Series or scalar value\nlevel : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\nfill_value : None or float value, default None (NaN)\n Fill existing missing (NaN) values, and any new element needed for\n successful Series alignment, with this value before computation.\n If data in both corresponding Series locations is missing\n the result of filling (at that location) will be missing.\naxis : {{0 or 'index'}}\n Unused. Parameter needed for compatibility with DataFrame.\n\nReturns\n-------\n{series_returns}\n"""\n\n_see_also_reverse_SERIES = """\nSee Also\n--------\nSeries.{reverse} : {see_also_desc}.\n"""\n\n_flex_doc_FRAME = """\nGet {desc} of dataframe and other, element-wise (binary operator `{op_name}`).\n\nEquivalent to ``{equiv}``, but with support to substitute a fill_value\nfor missing data in one of the inputs. With reverse version, `{reverse}`.\n\nAmong flexible wrappers (`add`, `sub`, `mul`, `div`, `floordiv`, `mod`, `pow`) to\narithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.\n\nParameters\n----------\nother : scalar, sequence, Series, dict or DataFrame\n Any single or multiple element data structure, or list-like object.\naxis : {{0 or 'index', 1 or 'columns'}}\n Whether to compare by the index (0 or 'index') or columns.\n (1 or 'columns'). For Series input, axis to match Series index on.\nlevel : int or label\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\nfill_value : float or None, default None\n Fill existing missing (NaN) values, and any new element needed for\n successful DataFrame alignment, with this value before computation.\n If data in both corresponding DataFrame locations is missing\n the result will be missing.\n\nReturns\n-------\nDataFrame\n Result of the arithmetic operation.\n\nSee Also\n--------\nDataFrame.add : Add DataFrames.\nDataFrame.sub : Subtract DataFrames.\nDataFrame.mul : Multiply DataFrames.\nDataFrame.div : Divide DataFrames (float division).\nDataFrame.truediv : Divide DataFrames (float division).\nDataFrame.floordiv : Divide DataFrames (integer division).\nDataFrame.mod : Calculate modulo (remainder after division).\nDataFrame.pow : Calculate exponential power.\n\nNotes\n-----\nMismatched indices will be unioned together.\n\nExamples\n--------\n>>> df = pd.DataFrame({{'angles': [0, 3, 4],\n... 'degrees': [360, 180, 360]}},\n... index=['circle', 'triangle', 'rectangle'])\n>>> df\n angles degrees\ncircle 0 360\ntriangle 3 180\nrectangle 4 360\n\nAdd a scalar with operator version which return the same\nresults.\n\n>>> df + 1\n angles degrees\ncircle 1 361\ntriangle 4 181\nrectangle 5 361\n\n>>> df.add(1)\n angles degrees\ncircle 1 361\ntriangle 4 181\nrectangle 5 361\n\nDivide by constant with reverse version.\n\n>>> df.div(10)\n angles degrees\ncircle 0.0 36.0\ntriangle 0.3 18.0\nrectangle 0.4 36.0\n\n>>> df.rdiv(10)\n angles degrees\ncircle inf 0.027778\ntriangle 3.333333 0.055556\nrectangle 2.500000 0.027778\n\nSubtract a list and Series by axis with operator version.\n\n>>> df - [1, 2]\n angles degrees\ncircle -1 358\ntriangle 2 178\nrectangle 3 358\n\n>>> df.sub([1, 2], axis='columns')\n angles degrees\ncircle -1 358\ntriangle 2 178\nrectangle 3 358\n\n>>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']),\n... axis='index')\n angles degrees\ncircle -1 359\ntriangle 2 179\nrectangle 3 359\n\nMultiply a dictionary by axis.\n\n>>> df.mul({{'angles': 0, 'degrees': 2}})\n angles degrees\ncircle 0 720\ntriangle 0 360\nrectangle 0 720\n\n>>> df.mul({{'circle': 0, 'triangle': 2, 'rectangle': 3}}, axis='index')\n angles degrees\ncircle 0 0\ntriangle 6 360\nrectangle 12 1080\n\nMultiply a DataFrame of different shape with operator version.\n\n>>> other = pd.DataFrame({{'angles': [0, 3, 4]}},\n... index=['circle', 'triangle', 'rectangle'])\n>>> other\n angles\ncircle 0\ntriangle 3\nrectangle 4\n\n>>> df * other\n angles degrees\ncircle 0 NaN\ntriangle 9 NaN\nrectangle 16 NaN\n\n>>> df.mul(other, fill_value=0)\n angles degrees\ncircle 0 0.0\ntriangle 9 0.0\nrectangle 16 0.0\n\nDivide by a MultiIndex by level.\n\n>>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6],\n... 'degrees': [360, 180, 360, 360, 540, 720]}},\n... index=[['A', 'A', 'A', 'B', 'B', 'B'],\n... ['circle', 'triangle', 'rectangle',\n... 'square', 'pentagon', 'hexagon']])\n>>> df_multindex\n angles degrees\nA circle 0 360\n triangle 3 180\n rectangle 4 360\nB square 4 360\n pentagon 5 540\n hexagon 6 720\n\n>>> df.div(df_multindex, level=1, fill_value=0)\n angles degrees\nA circle NaN 1.0\n triangle 1.0 1.0\n rectangle 1.0 1.0\nB square 0.0 0.0\n pentagon 0.0 0.0\n hexagon 0.0 0.0\n"""\n\n_flex_comp_doc_FRAME = """\nGet {desc} of dataframe and other, element-wise (binary operator `{op_name}`).\n\nAmong flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison\noperators.\n\nEquivalent to `==`, `!=`, `<=`, `<`, `>=`, `>` with support to choose axis\n(rows or columns) and level for comparison.\n\nParameters\n----------\nother : scalar, sequence, Series, or DataFrame\n Any single or multiple element data structure, or list-like object.\naxis : {{0 or 'index', 1 or 'columns'}}, default 'columns'\n Whether to compare by the index (0 or 'index') or columns\n (1 or 'columns').\nlevel : int or label\n Broadcast across a level, matching Index values on the passed\n MultiIndex level.\n\nReturns\n-------\nDataFrame of bool\n Result of the comparison.\n\nSee Also\n--------\nDataFrame.eq : Compare DataFrames for equality elementwise.\nDataFrame.ne : Compare DataFrames for inequality elementwise.\nDataFrame.le : Compare DataFrames for less than inequality\n or equality elementwise.\nDataFrame.lt : Compare DataFrames for strictly less than\n inequality elementwise.\nDataFrame.ge : Compare DataFrames for greater than inequality\n or equality elementwise.\nDataFrame.gt : Compare DataFrames for strictly greater than\n inequality elementwise.\n\nNotes\n-----\nMismatched indices will be unioned together.\n`NaN` values are considered different (i.e. `NaN` != `NaN`).\n\nExamples\n--------\n>>> df = pd.DataFrame({{'cost': [250, 150, 100],\n... 'revenue': [100, 250, 300]}},\n... index=['A', 'B', 'C'])\n>>> df\n cost revenue\nA 250 100\nB 150 250\nC 100 300\n\nComparison with a scalar, using either the operator or method:\n\n>>> df == 100\n cost revenue\nA False True\nB False False\nC True False\n\n>>> df.eq(100)\n cost revenue\nA False True\nB False False\nC True False\n\nWhen `other` is a :class:`Series`, the columns of a DataFrame are aligned\nwith the index of `other` and broadcast:\n\n>>> df != pd.Series([100, 250], index=["cost", "revenue"])\n cost revenue\nA True True\nB True False\nC False True\n\nUse the method to control the broadcast axis:\n\n>>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index')\n cost revenue\nA True False\nB True True\nC True True\nD True True\n\nWhen comparing to an arbitrary sequence, the number of columns must\nmatch the number elements in `other`:\n\n>>> df == [250, 100]\n cost revenue\nA True True\nB False False\nC False False\n\nUse the method to control the axis:\n\n>>> df.eq([250, 250, 100], axis='index')\n cost revenue\nA True False\nB False True\nC True False\n\nCompare to a DataFrame of different shape.\n\n>>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}},\n... index=['A', 'B', 'C', 'D'])\n>>> other\n revenue\nA 300\nB 250\nC 100\nD 150\n\n>>> df.gt(other)\n cost revenue\nA False False\nB False False\nC False True\nD False False\n\nCompare to a MultiIndex by level.\n\n>>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220],\n... 'revenue': [100, 250, 300, 200, 175, 225]}},\n... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'],\n... ['A', 'B', 'C', 'A', 'B', 'C']])\n>>> df_multindex\n cost revenue\nQ1 A 250 100\n B 150 250\n C 100 300\nQ2 A 150 200\n B 300 175\n C 220 225\n\n>>> df.le(df_multindex, level=1)\n cost revenue\nQ1 A True True\n B True True\n C True True\nQ2 A False True\n B True False\n C True False\n"""\n | .venv\Lib\site-packages\pandas\core\ops\docstrings.py | docstrings.py | Python | 18,448 | 0.95 | 0.033679 | 0.002907 | node-utils | 774 | 2024-06-17T23:59:12.802614 | Apache-2.0 | false | 0c7927b1c5b9640a5742c4129b1a4b3d |
"""\nTemplates for invalid operations.\n"""\nfrom __future__ import annotations\n\nimport operator\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nif TYPE_CHECKING:\n from pandas._typing import npt\n\n\ndef invalid_comparison(left, right, op) -> npt.NDArray[np.bool_]:\n """\n If a comparison has mismatched types and is not necessarily meaningful,\n follow python3 conventions by:\n\n - returning all-False for equality\n - returning all-True for inequality\n - raising TypeError otherwise\n\n Parameters\n ----------\n left : array-like\n right : scalar, array-like\n op : operator.{eq, ne, lt, le, gt}\n\n Raises\n ------\n TypeError : on inequality comparisons\n """\n if op is operator.eq:\n res_values = np.zeros(left.shape, dtype=bool)\n elif op is operator.ne:\n res_values = np.ones(left.shape, dtype=bool)\n else:\n typ = type(right).__name__\n raise TypeError(f"Invalid comparison between dtype={left.dtype} and {typ}")\n return res_values\n\n\ndef make_invalid_op(name: str):\n """\n Return a binary method that always raises a TypeError.\n\n Parameters\n ----------\n name : str\n\n Returns\n -------\n invalid_op : function\n """\n\n def invalid_op(self, other=None):\n typ = type(self).__name__\n raise TypeError(f"cannot perform {name} with this index type: {typ}")\n\n invalid_op.__name__ = name\n return invalid_op\n | .venv\Lib\site-packages\pandas\core\ops\invalid.py | invalid.py | Python | 1,433 | 0.85 | 0.145161 | 0 | vue-tools | 862 | 2025-04-24T13:05:57.072239 | MIT | false | d8138ad891a372e739e62b4bdb0d38fb |
"""\nOps for masked arrays.\n"""\nfrom __future__ import annotations\n\nimport numpy as np\n\nfrom pandas._libs import (\n lib,\n missing as libmissing,\n)\n\n\ndef kleene_or(\n left: bool | np.ndarray | libmissing.NAType,\n right: bool | np.ndarray | libmissing.NAType,\n left_mask: np.ndarray | None,\n right_mask: np.ndarray | None,\n):\n """\n Boolean ``or`` using Kleene logic.\n\n Values are NA where we have ``NA | NA`` or ``NA | False``.\n ``NA | True`` is considered True.\n\n Parameters\n ----------\n left, right : ndarray, NA, or bool\n The values of the array.\n left_mask, right_mask : ndarray, optional\n The masks. Only one of these may be None, which implies that\n the associated `left` or `right` value is a scalar.\n\n Returns\n -------\n result, mask: ndarray[bool]\n The result of the logical or, and the new mask.\n """\n # To reduce the number of cases, we ensure that `left` & `left_mask`\n # always come from an array, not a scalar. This is safe, since\n # A | B == B | A\n if left_mask is None:\n return kleene_or(right, left, right_mask, left_mask)\n\n if not isinstance(left, np.ndarray):\n raise TypeError("Either `left` or `right` need to be a np.ndarray.")\n\n raise_for_nan(right, method="or")\n\n if right is libmissing.NA:\n result = left.copy()\n else:\n result = left | right\n\n if right_mask is not None:\n # output is unknown where (False & NA), (NA & False), (NA & NA)\n left_false = ~(left | left_mask)\n right_false = ~(right | right_mask)\n mask = (\n (left_false & right_mask)\n | (right_false & left_mask)\n | (left_mask & right_mask)\n )\n else:\n if right is True:\n mask = np.zeros_like(left_mask)\n elif right is libmissing.NA:\n mask = (~left & ~left_mask) | left_mask\n else:\n # False\n mask = left_mask.copy()\n\n return result, mask\n\n\ndef kleene_xor(\n left: bool | np.ndarray | libmissing.NAType,\n right: bool | np.ndarray | libmissing.NAType,\n left_mask: np.ndarray | None,\n right_mask: np.ndarray | None,\n):\n """\n Boolean ``xor`` using Kleene logic.\n\n This is the same as ``or``, with the following adjustments\n\n * True, True -> False\n * True, NA -> NA\n\n Parameters\n ----------\n left, right : ndarray, NA, or bool\n The values of the array.\n left_mask, right_mask : ndarray, optional\n The masks. Only one of these may be None, which implies that\n the associated `left` or `right` value is a scalar.\n\n Returns\n -------\n result, mask: ndarray[bool]\n The result of the logical xor, and the new mask.\n """\n # To reduce the number of cases, we ensure that `left` & `left_mask`\n # always come from an array, not a scalar. This is safe, since\n # A ^ B == B ^ A\n if left_mask is None:\n return kleene_xor(right, left, right_mask, left_mask)\n\n if not isinstance(left, np.ndarray):\n raise TypeError("Either `left` or `right` need to be a np.ndarray.")\n\n raise_for_nan(right, method="xor")\n if right is libmissing.NA:\n result = np.zeros_like(left)\n else:\n result = left ^ right\n\n if right_mask is None:\n if right is libmissing.NA:\n mask = np.ones_like(left_mask)\n else:\n mask = left_mask.copy()\n else:\n mask = left_mask | right_mask\n\n return result, mask\n\n\ndef kleene_and(\n left: bool | libmissing.NAType | np.ndarray,\n right: bool | libmissing.NAType | np.ndarray,\n left_mask: np.ndarray | None,\n right_mask: np.ndarray | None,\n):\n """\n Boolean ``and`` using Kleene logic.\n\n Values are ``NA`` for ``NA & NA`` or ``True & NA``.\n\n Parameters\n ----------\n left, right : ndarray, NA, or bool\n The values of the array.\n left_mask, right_mask : ndarray, optional\n The masks. Only one of these may be None, which implies that\n the associated `left` or `right` value is a scalar.\n\n Returns\n -------\n result, mask: ndarray[bool]\n The result of the logical xor, and the new mask.\n """\n # To reduce the number of cases, we ensure that `left` & `left_mask`\n # always come from an array, not a scalar. This is safe, since\n # A & B == B & A\n if left_mask is None:\n return kleene_and(right, left, right_mask, left_mask)\n\n if not isinstance(left, np.ndarray):\n raise TypeError("Either `left` or `right` need to be a np.ndarray.")\n raise_for_nan(right, method="and")\n\n if right is libmissing.NA:\n result = np.zeros_like(left)\n else:\n result = left & right\n\n if right_mask is None:\n # Scalar `right`\n if right is libmissing.NA:\n mask = (left & ~left_mask) | left_mask\n\n else:\n mask = left_mask.copy()\n if right is False:\n # unmask everything\n mask[:] = False\n else:\n # unmask where either left or right is False\n left_false = ~(left | left_mask)\n right_false = ~(right | right_mask)\n mask = (left_mask & ~right_false) | (right_mask & ~left_false)\n\n return result, mask\n\n\ndef raise_for_nan(value, method: str) -> None:\n if lib.is_float(value) and np.isnan(value):\n raise ValueError(f"Cannot perform logical '{method}' with floating NaN")\n | .venv\Lib\site-packages\pandas\core\ops\mask_ops.py | mask_ops.py | Python | 5,409 | 0.95 | 0.121693 | 0.103226 | vue-tools | 540 | 2023-08-12T01:06:40.962427 | MIT | false | 2bc1f45f9383fa325b9b67f656aa1dc9 |
"""\nMissing data handling for arithmetic operations.\n\nIn particular, pandas conventions regarding division by zero differ\nfrom numpy in the following ways:\n 1) np.array([-1, 0, 1], dtype=dtype1) // np.array([0, 0, 0], dtype=dtype2)\n gives [nan, nan, nan] for most dtype combinations, and [0, 0, 0] for\n the remaining pairs\n (the remaining being dtype1==dtype2==intN and dtype==dtype2==uintN).\n\n pandas convention is to return [-inf, nan, inf] for all dtype\n combinations.\n\n Note: the numpy behavior described here is py3-specific.\n\n 2) np.array([-1, 0, 1], dtype=dtype1) % np.array([0, 0, 0], dtype=dtype2)\n gives precisely the same results as the // operation.\n\n pandas convention is to return [nan, nan, nan] for all dtype\n combinations.\n\n 3) divmod behavior consistent with 1) and 2).\n"""\nfrom __future__ import annotations\n\nimport operator\n\nimport numpy as np\n\nfrom pandas.core import roperator\n\n\ndef _fill_zeros(result: np.ndarray, x, y):\n """\n If this is a reversed op, then flip x,y\n\n If we have an integer value (or array in y)\n and we have 0's, fill them with np.nan,\n return the result.\n\n Mask the nan's from x.\n """\n if result.dtype.kind == "f":\n return result\n\n is_variable_type = hasattr(y, "dtype")\n is_scalar_type = not isinstance(y, np.ndarray)\n\n if not is_variable_type and not is_scalar_type:\n # e.g. test_series_ops_name_retention with mod we get here with list/tuple\n return result\n\n if is_scalar_type:\n y = np.array(y)\n\n if y.dtype.kind in "iu":\n ymask = y == 0\n if ymask.any():\n # GH#7325, mask and nans must be broadcastable\n mask = ymask & ~np.isnan(result)\n\n # GH#9308 doing ravel on result and mask can improve putmask perf,\n # but can also make unwanted copies.\n result = result.astype("float64", copy=False)\n\n np.putmask(result, mask, np.nan)\n\n return result\n\n\ndef mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray:\n """\n Set results of 0 // 0 to np.nan, regardless of the dtypes\n of the numerator or the denominator.\n\n Parameters\n ----------\n x : ndarray\n y : ndarray\n result : ndarray\n\n Returns\n -------\n ndarray\n The filled result.\n\n Examples\n --------\n >>> x = np.array([1, 0, -1], dtype=np.int64)\n >>> x\n array([ 1, 0, -1])\n >>> y = 0 # int 0; numpy behavior is different with float\n >>> result = x // y\n >>> result # raw numpy result does not fill division by zero\n array([0, 0, 0])\n >>> mask_zero_div_zero(x, y, result)\n array([ inf, nan, -inf])\n """\n\n if not hasattr(y, "dtype"):\n # e.g. scalar, tuple\n y = np.array(y)\n if not hasattr(x, "dtype"):\n # e.g scalar, tuple\n x = np.array(x)\n\n zmask = y == 0\n\n if zmask.any():\n # Flip sign if necessary for -0.0\n zneg_mask = zmask & np.signbit(y)\n zpos_mask = zmask & ~zneg_mask\n\n x_lt0 = x < 0\n x_gt0 = x > 0\n nan_mask = zmask & (x == 0)\n neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0)\n posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0)\n\n if nan_mask.any() or neginf_mask.any() or posinf_mask.any():\n # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN\n result = result.astype("float64", copy=False)\n\n result[nan_mask] = np.nan\n result[posinf_mask] = np.inf\n result[neginf_mask] = -np.inf\n\n return result\n\n\ndef dispatch_fill_zeros(op, left, right, result):\n """\n Call _fill_zeros with the appropriate fill value depending on the operation,\n with special logic for divmod and rdivmod.\n\n Parameters\n ----------\n op : function (operator.add, operator.div, ...)\n left : object (np.ndarray for non-reversed ops)\n We have excluded ExtensionArrays here\n right : object (np.ndarray for reversed ops)\n We have excluded ExtensionArrays here\n result : ndarray\n\n Returns\n -------\n result : np.ndarray\n\n Notes\n -----\n For divmod and rdivmod, the `result` parameter and returned `result`\n is a 2-tuple of ndarray objects.\n """\n if op is divmod:\n result = (\n mask_zero_div_zero(left, right, result[0]),\n _fill_zeros(result[1], left, right),\n )\n elif op is roperator.rdivmod:\n result = (\n mask_zero_div_zero(right, left, result[0]),\n _fill_zeros(result[1], right, left),\n )\n elif op is operator.floordiv:\n # Note: no need to do this for truediv; in py3 numpy behaves the way\n # we want.\n result = mask_zero_div_zero(left, right, result)\n elif op is roperator.rfloordiv:\n # Note: no need to do this for rtruediv; in py3 numpy behaves the way\n # we want.\n result = mask_zero_div_zero(right, left, result)\n elif op is operator.mod:\n result = _fill_zeros(result, left, right)\n elif op is roperator.rmod:\n result = _fill_zeros(result, right, left)\n return result\n | .venv\Lib\site-packages\pandas\core\ops\missing.py | missing.py | Python | 5,140 | 0.95 | 0.147727 | 0.086331 | node-utils | 449 | 2025-01-24T22:49:20.424232 | MIT | false | 1e497dc79a960e499847bce4fc47e80a |
"""\nArithmetic operations for PandasObjects\n\nThis is not a public API.\n"""\nfrom __future__ import annotations\n\nfrom pandas.core.ops.array_ops import (\n arithmetic_op,\n comp_method_OBJECT_ARRAY,\n comparison_op,\n fill_binop,\n get_array_op,\n logical_op,\n maybe_prepare_scalar_for_op,\n)\nfrom pandas.core.ops.common import (\n get_op_result_name,\n unpack_zerodim_and_defer,\n)\nfrom pandas.core.ops.docstrings import make_flex_doc\nfrom pandas.core.ops.invalid import invalid_comparison\nfrom pandas.core.ops.mask_ops import (\n kleene_and,\n kleene_or,\n kleene_xor,\n)\nfrom pandas.core.roperator import (\n radd,\n rand_,\n rdiv,\n rdivmod,\n rfloordiv,\n rmod,\n rmul,\n ror_,\n rpow,\n rsub,\n rtruediv,\n rxor,\n)\n\n# -----------------------------------------------------------------------------\n# constants\nARITHMETIC_BINOPS: set[str] = {\n "add",\n "sub",\n "mul",\n "pow",\n "mod",\n "floordiv",\n "truediv",\n "divmod",\n "radd",\n "rsub",\n "rmul",\n "rpow",\n "rmod",\n "rfloordiv",\n "rtruediv",\n "rdivmod",\n}\n\n\n__all__ = [\n "ARITHMETIC_BINOPS",\n "arithmetic_op",\n "comparison_op",\n "comp_method_OBJECT_ARRAY",\n "invalid_comparison",\n "fill_binop",\n "kleene_and",\n "kleene_or",\n "kleene_xor",\n "logical_op",\n "make_flex_doc",\n "radd",\n "rand_",\n "rdiv",\n "rdivmod",\n "rfloordiv",\n "rmod",\n "rmul",\n "ror_",\n "rpow",\n "rsub",\n "rtruediv",\n "rxor",\n "unpack_zerodim_and_defer",\n "get_op_result_name",\n "maybe_prepare_scalar_for_op",\n "get_array_op",\n]\n | .venv\Lib\site-packages\pandas\core\ops\__init__.py | __init__.py | Python | 1,620 | 0.95 | 0.010753 | 0.022727 | node-utils | 8 | 2023-10-28T21:16:35.584603 | MIT | false | 3338728ad2f7e0840b42a0208c12caf9 |
\n\n | .venv\Lib\site-packages\pandas\core\ops\__pycache__\array_ops.cpython-313.pyc | array_ops.cpython-313.pyc | Other | 18,597 | 0.95 | 0.04 | 0 | vue-tools | 62 | 2023-09-06T22:23:40.544904 | MIT | false | ab49e51dc447bad74a8b4d759ea184dd |
\n\n | .venv\Lib\site-packages\pandas\core\ops\__pycache__\common.cpython-313.pyc | common.cpython-313.pyc | Other | 4,264 | 0.95 | 0.043956 | 0 | awesome-app | 426 | 2025-03-24T17:30:06.775863 | MIT | false | 3f29d2ac121e688decbdb2f149cee35a |
\n\n | .venv\Lib\site-packages\pandas\core\ops\__pycache__\dispatch.cpython-313.pyc | dispatch.cpython-313.pyc | Other | 968 | 0.8 | 0.041667 | 0 | react-lib | 264 | 2024-07-15T05:49:41.845929 | BSD-3-Clause | false | 60f78f3f0391f4b9afe208b21ee84927 |
\n\n | .venv\Lib\site-packages\pandas\core\ops\__pycache__\docstrings.cpython-313.pyc | docstrings.cpython-313.pyc | Other | 16,393 | 0.8 | 0.030521 | 0 | react-lib | 284 | 2023-12-17T20:52:51.469831 | Apache-2.0 | false | 8943334d27527d9683f52311d57ec3f4 |
\n\n | .venv\Lib\site-packages\pandas\core\ops\__pycache__\invalid.cpython-313.pyc | invalid.cpython-313.pyc | Other | 2,214 | 0.95 | 0.090909 | 0 | react-lib | 418 | 2025-01-07T18:37:46.807011 | GPL-3.0 | false | 33b1107ca758e0ccc46609e7d4b3b6fc |
\n\n | .venv\Lib\site-packages\pandas\core\ops\__pycache__\mask_ops.cpython-313.pyc | mask_ops.cpython-313.pyc | Other | 5,161 | 0.8 | 0.019231 | 0.034884 | node-utils | 621 | 2024-11-27T02:38:00.382962 | BSD-3-Clause | false | ea975d014b31122972bda224dcada165 |
\n\n | .venv\Lib\site-packages\pandas\core\ops\__pycache__\missing.cpython-313.pyc | missing.cpython-313.pyc | Other | 5,513 | 0.95 | 0.086538 | 0 | awesome-app | 52 | 2023-09-24T21:59:02.800491 | BSD-3-Clause | false | 2ce3713dc2c95717b5ce658ceeb449c4 |
\n\n | .venv\Lib\site-packages\pandas\core\ops\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 1,502 | 0.8 | 0.033333 | 0 | awesome-app | 356 | 2024-03-02T05:03:38.439433 | Apache-2.0 | false | db0133d587b3770d0853d38bd7b5e810 |
from pandas.core.reshape.concat import concat\nfrom pandas.core.reshape.encoding import (\n from_dummies,\n get_dummies,\n)\nfrom pandas.core.reshape.melt import (\n lreshape,\n melt,\n wide_to_long,\n)\nfrom pandas.core.reshape.merge import (\n merge,\n merge_asof,\n merge_ordered,\n)\nfrom pandas.core.reshape.pivot import (\n crosstab,\n pivot,\n pivot_table,\n)\nfrom pandas.core.reshape.tile import (\n cut,\n qcut,\n)\n\n__all__ = [\n "concat",\n "crosstab",\n "cut",\n "from_dummies",\n "get_dummies",\n "lreshape",\n "melt",\n "merge",\n "merge_asof",\n "merge_ordered",\n "pivot",\n "pivot_table",\n "qcut",\n "wide_to_long",\n]\n | .venv\Lib\site-packages\pandas\core\reshape\api.py | api.py | Python | 680 | 0.85 | 0 | 0 | react-lib | 96 | 2024-03-23T19:24:59.332736 | Apache-2.0 | false | f860d0b327bd42be481b804564356b97 |
"""\nConcat routines.\n"""\nfrom __future__ import annotations\n\nfrom collections import abc\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Literal,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import using_copy_on_write\n\nfrom pandas.util._decorators import cache_readonly\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n is_bool,\n is_iterator,\n)\nfrom pandas.core.dtypes.concat import concat_compat\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core.arrays.categorical import (\n factorize_from_iterable,\n factorize_from_iterables,\n)\nimport pandas.core.common as com\nfrom pandas.core.indexes.api import (\n Index,\n MultiIndex,\n all_indexes_same,\n default_index,\n ensure_index,\n get_objs_combined_axis,\n get_unanimous_names,\n)\nfrom pandas.core.internals import concatenate_managers\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Iterable,\n Mapping,\n )\n\n from pandas._typing import (\n Axis,\n AxisInt,\n HashableT,\n )\n\n from pandas import (\n DataFrame,\n Series,\n )\n\n# ---------------------------------------------------------------------\n# Concatenate DataFrame objects\n\n\n@overload\ndef concat(\n objs: Iterable[DataFrame] | Mapping[HashableT, DataFrame],\n *,\n axis: Literal[0, "index"] = ...,\n join: str = ...,\n ignore_index: bool = ...,\n keys: Iterable[Hashable] | None = ...,\n levels=...,\n names: list[HashableT] | None = ...,\n verify_integrity: bool = ...,\n sort: bool = ...,\n copy: bool | None = ...,\n) -> DataFrame:\n ...\n\n\n@overload\ndef concat(\n objs: Iterable[Series] | Mapping[HashableT, Series],\n *,\n axis: Literal[0, "index"] = ...,\n join: str = ...,\n ignore_index: bool = ...,\n keys: Iterable[Hashable] | None = ...,\n levels=...,\n names: list[HashableT] | None = ...,\n verify_integrity: bool = ...,\n sort: bool = ...,\n copy: bool | None = ...,\n) -> Series:\n ...\n\n\n@overload\ndef concat(\n objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame],\n *,\n axis: Literal[0, "index"] = ...,\n join: str = ...,\n ignore_index: bool = ...,\n keys: Iterable[Hashable] | None = ...,\n levels=...,\n names: list[HashableT] | None = ...,\n verify_integrity: bool = ...,\n sort: bool = ...,\n copy: bool | None = ...,\n) -> DataFrame | Series:\n ...\n\n\n@overload\ndef concat(\n objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame],\n *,\n axis: Literal[1, "columns"],\n join: str = ...,\n ignore_index: bool = ...,\n keys: Iterable[Hashable] | None = ...,\n levels=...,\n names: list[HashableT] | None = ...,\n verify_integrity: bool = ...,\n sort: bool = ...,\n copy: bool | None = ...,\n) -> DataFrame:\n ...\n\n\n@overload\ndef concat(\n objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame],\n *,\n axis: Axis = ...,\n join: str = ...,\n ignore_index: bool = ...,\n keys: Iterable[Hashable] | None = ...,\n levels=...,\n names: list[HashableT] | None = ...,\n verify_integrity: bool = ...,\n sort: bool = ...,\n copy: bool | None = ...,\n) -> DataFrame | Series:\n ...\n\n\ndef concat(\n objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame],\n *,\n axis: Axis = 0,\n join: str = "outer",\n ignore_index: bool = False,\n keys: Iterable[Hashable] | None = None,\n levels=None,\n names: list[HashableT] | None = None,\n verify_integrity: bool = False,\n sort: bool = False,\n copy: bool | None = None,\n) -> DataFrame | Series:\n """\n Concatenate pandas objects along a particular axis.\n\n Allows optional set logic along the other axes.\n\n Can also add a layer of hierarchical indexing on the concatenation axis,\n which may be useful if the labels are the same (or overlapping) on\n the passed axis number.\n\n Parameters\n ----------\n objs : a sequence or mapping of Series or DataFrame objects\n If a mapping is passed, the sorted keys will be used as the `keys`\n argument, unless it is passed, in which case the values will be\n selected (see below). Any None objects will be dropped silently unless\n they are all None in which case a ValueError will be raised.\n axis : {0/'index', 1/'columns'}, default 0\n The axis to concatenate along.\n join : {'inner', 'outer'}, default 'outer'\n How to handle indexes on other axis (or axes).\n ignore_index : bool, default False\n If True, do not use the index values along the concatenation axis. The\n resulting axis will be labeled 0, ..., n - 1. This is useful if you are\n concatenating objects where the concatenation axis does not have\n meaningful indexing information. Note the index values on the other\n axes are still respected in the join.\n keys : sequence, default None\n If multiple levels passed, should contain tuples. Construct\n hierarchical index using the passed keys as the outermost level.\n levels : list of sequences, default None\n Specific levels (unique values) to use for constructing a\n MultiIndex. Otherwise they will be inferred from the keys.\n names : list, default None\n Names for the levels in the resulting hierarchical index.\n verify_integrity : bool, default False\n Check whether the new concatenated axis contains duplicates. This can\n be very expensive relative to the actual data concatenation.\n sort : bool, default False\n Sort non-concatenation axis if it is not already aligned. One exception to\n this is when the non-concatentation axis is a DatetimeIndex and join='outer'\n and the axis is not already aligned. In that case, the non-concatenation\n axis is always sorted lexicographically.\n copy : bool, default True\n If False, do not copy data unnecessarily.\n\n Returns\n -------\n object, type of objs\n When concatenating all ``Series`` along the index (axis=0), a\n ``Series`` is returned. When ``objs`` contains at least one\n ``DataFrame``, a ``DataFrame`` is returned. When concatenating along\n the columns (axis=1), a ``DataFrame`` is returned.\n\n See Also\n --------\n DataFrame.join : Join DataFrames using indexes.\n DataFrame.merge : Merge DataFrames by indexes or columns.\n\n Notes\n -----\n The keys, levels, and names arguments are all optional.\n\n A walkthrough of how this method fits in with other tools for combining\n pandas objects can be found `here\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html>`__.\n\n It is not recommended to build DataFrames by adding single rows in a\n for loop. Build a list of rows and make a DataFrame in a single concat.\n\n Examples\n --------\n Combine two ``Series``.\n\n >>> s1 = pd.Series(['a', 'b'])\n >>> s2 = pd.Series(['c', 'd'])\n >>> pd.concat([s1, s2])\n 0 a\n 1 b\n 0 c\n 1 d\n dtype: object\n\n Clear the existing index and reset it in the result\n by setting the ``ignore_index`` option to ``True``.\n\n >>> pd.concat([s1, s2], ignore_index=True)\n 0 a\n 1 b\n 2 c\n 3 d\n dtype: object\n\n Add a hierarchical index at the outermost level of\n the data with the ``keys`` option.\n\n >>> pd.concat([s1, s2], keys=['s1', 's2'])\n s1 0 a\n 1 b\n s2 0 c\n 1 d\n dtype: object\n\n Label the index keys you create with the ``names`` option.\n\n >>> pd.concat([s1, s2], keys=['s1', 's2'],\n ... names=['Series name', 'Row ID'])\n Series name Row ID\n s1 0 a\n 1 b\n s2 0 c\n 1 d\n dtype: object\n\n Combine two ``DataFrame`` objects with identical columns.\n\n >>> df1 = pd.DataFrame([['a', 1], ['b', 2]],\n ... columns=['letter', 'number'])\n >>> df1\n letter number\n 0 a 1\n 1 b 2\n >>> df2 = pd.DataFrame([['c', 3], ['d', 4]],\n ... columns=['letter', 'number'])\n >>> df2\n letter number\n 0 c 3\n 1 d 4\n >>> pd.concat([df1, df2])\n letter number\n 0 a 1\n 1 b 2\n 0 c 3\n 1 d 4\n\n Combine ``DataFrame`` objects with overlapping columns\n and return everything. Columns outside the intersection will\n be filled with ``NaN`` values.\n\n >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],\n ... columns=['letter', 'number', 'animal'])\n >>> df3\n letter number animal\n 0 c 3 cat\n 1 d 4 dog\n >>> pd.concat([df1, df3], sort=False)\n letter number animal\n 0 a 1 NaN\n 1 b 2 NaN\n 0 c 3 cat\n 1 d 4 dog\n\n Combine ``DataFrame`` objects with overlapping columns\n and return only those that are shared by passing ``inner`` to\n the ``join`` keyword argument.\n\n >>> pd.concat([df1, df3], join="inner")\n letter number\n 0 a 1\n 1 b 2\n 0 c 3\n 1 d 4\n\n Combine ``DataFrame`` objects horizontally along the x axis by\n passing in ``axis=1``.\n\n >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],\n ... columns=['animal', 'name'])\n >>> pd.concat([df1, df4], axis=1)\n letter number animal name\n 0 a 1 bird polly\n 1 b 2 monkey george\n\n Prevent the result from including duplicate index values with the\n ``verify_integrity`` option.\n\n >>> df5 = pd.DataFrame([1], index=['a'])\n >>> df5\n 0\n a 1\n >>> df6 = pd.DataFrame([2], index=['a'])\n >>> df6\n 0\n a 2\n >>> pd.concat([df5, df6], verify_integrity=True)\n Traceback (most recent call last):\n ...\n ValueError: Indexes have overlapping values: ['a']\n\n Append a single row to the end of a ``DataFrame`` object.\n\n >>> df7 = pd.DataFrame({'a': 1, 'b': 2}, index=[0])\n >>> df7\n a b\n 0 1 2\n >>> new_row = pd.Series({'a': 3, 'b': 4})\n >>> new_row\n a 3\n b 4\n dtype: int64\n >>> pd.concat([df7, new_row.to_frame().T], ignore_index=True)\n a b\n 0 1 2\n 1 3 4\n """\n if copy is None:\n if using_copy_on_write():\n copy = False\n else:\n copy = True\n elif copy and using_copy_on_write():\n copy = False\n\n op = _Concatenator(\n objs,\n axis=axis,\n ignore_index=ignore_index,\n join=join,\n keys=keys,\n levels=levels,\n names=names,\n verify_integrity=verify_integrity,\n copy=copy,\n sort=sort,\n )\n\n return op.get_result()\n\n\nclass _Concatenator:\n """\n Orchestrates a concatenation operation for BlockManagers\n """\n\n sort: bool\n\n def __init__(\n self,\n objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame],\n axis: Axis = 0,\n join: str = "outer",\n keys: Iterable[Hashable] | None = None,\n levels=None,\n names: list[HashableT] | None = None,\n ignore_index: bool = False,\n verify_integrity: bool = False,\n copy: bool = True,\n sort: bool = False,\n ) -> None:\n if isinstance(objs, (ABCSeries, ABCDataFrame, str)):\n raise TypeError(\n "first argument must be an iterable of pandas "\n f'objects, you passed an object of type "{type(objs).__name__}"'\n )\n\n if join == "outer":\n self.intersect = False\n elif join == "inner":\n self.intersect = True\n else: # pragma: no cover\n raise ValueError(\n "Only can inner (intersect) or outer (union) join the other axis"\n )\n\n if not is_bool(sort):\n raise ValueError(\n f"The 'sort' keyword only accepts boolean values; {sort} was passed."\n )\n # Incompatible types in assignment (expression has type "Union[bool, bool_]",\n # variable has type "bool")\n self.sort = sort # type: ignore[assignment]\n\n self.ignore_index = ignore_index\n self.verify_integrity = verify_integrity\n self.copy = copy\n\n objs, keys = self._clean_keys_and_objs(objs, keys)\n\n # figure out what our result ndim is going to be\n ndims = self._get_ndims(objs)\n sample, objs = self._get_sample_object(objs, ndims, keys, names, levels)\n\n # Standardize axis parameter to int\n if sample.ndim == 1:\n from pandas import DataFrame\n\n axis = DataFrame._get_axis_number(axis)\n self._is_frame = False\n self._is_series = True\n else:\n axis = sample._get_axis_number(axis)\n self._is_frame = True\n self._is_series = False\n\n # Need to flip BlockManager axis in the DataFrame special case\n axis = sample._get_block_manager_axis(axis)\n\n # if we have mixed ndims, then convert to highest ndim\n # creating column numbers as needed\n if len(ndims) > 1:\n objs = self._sanitize_mixed_ndim(objs, sample, ignore_index, axis)\n\n self.objs = objs\n\n # note: this is the BlockManager axis (since DataFrame is transposed)\n self.bm_axis = axis\n self.axis = 1 - self.bm_axis if self._is_frame else 0\n self.keys = keys\n self.names = names or getattr(keys, "names", None)\n self.levels = levels\n\n def _get_ndims(self, objs: list[Series | DataFrame]) -> set[int]:\n # figure out what our result ndim is going to be\n ndims = set()\n for obj in objs:\n if not isinstance(obj, (ABCSeries, ABCDataFrame)):\n msg = (\n f"cannot concatenate object of type '{type(obj)}'; "\n "only Series and DataFrame objs are valid"\n )\n raise TypeError(msg)\n\n ndims.add(obj.ndim)\n return ndims\n\n def _clean_keys_and_objs(\n self,\n objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame],\n keys,\n ) -> tuple[list[Series | DataFrame], Index | None]:\n if isinstance(objs, abc.Mapping):\n if keys is None:\n keys = list(objs.keys())\n objs_list = [objs[k] for k in keys]\n else:\n objs_list = list(objs)\n\n if len(objs_list) == 0:\n raise ValueError("No objects to concatenate")\n\n if keys is None:\n objs_list = list(com.not_none(*objs_list))\n else:\n # GH#1649\n clean_keys = []\n clean_objs = []\n if is_iterator(keys):\n keys = list(keys)\n if len(keys) != len(objs_list):\n # GH#43485\n warnings.warn(\n "The behavior of pd.concat with len(keys) != len(objs) is "\n "deprecated. In a future version this will raise instead of "\n "truncating to the smaller of the two sequences",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n for k, v in zip(keys, objs_list):\n if v is None:\n continue\n clean_keys.append(k)\n clean_objs.append(v)\n objs_list = clean_objs\n\n if isinstance(keys, MultiIndex):\n # TODO: retain levels?\n keys = type(keys).from_tuples(clean_keys, names=keys.names)\n else:\n name = getattr(keys, "name", None)\n keys = Index(clean_keys, name=name, dtype=getattr(keys, "dtype", None))\n\n if len(objs_list) == 0:\n raise ValueError("All objects passed were None")\n\n return objs_list, keys\n\n def _get_sample_object(\n self,\n objs: list[Series | DataFrame],\n ndims: set[int],\n keys,\n names,\n levels,\n ) -> tuple[Series | DataFrame, list[Series | DataFrame]]:\n # get the sample\n # want the highest ndim that we have, and must be non-empty\n # unless all objs are empty\n sample: Series | DataFrame | None = None\n if len(ndims) > 1:\n max_ndim = max(ndims)\n for obj in objs:\n if obj.ndim == max_ndim and np.sum(obj.shape):\n sample = obj\n break\n\n else:\n # filter out the empties if we have not multi-index possibilities\n # note to keep empty Series as it affect to result columns / name\n non_empties = [obj for obj in objs if sum(obj.shape) > 0 or obj.ndim == 1]\n\n if len(non_empties) and (\n keys is None and names is None and levels is None and not self.intersect\n ):\n objs = non_empties\n sample = objs[0]\n\n if sample is None:\n sample = objs[0]\n return sample, objs\n\n def _sanitize_mixed_ndim(\n self,\n objs: list[Series | DataFrame],\n sample: Series | DataFrame,\n ignore_index: bool,\n axis: AxisInt,\n ) -> list[Series | DataFrame]:\n # if we have mixed ndims, then convert to highest ndim\n # creating column numbers as needed\n\n new_objs = []\n\n current_column = 0\n max_ndim = sample.ndim\n for obj in objs:\n ndim = obj.ndim\n if ndim == max_ndim:\n pass\n\n elif ndim != max_ndim - 1:\n raise ValueError(\n "cannot concatenate unaligned mixed dimensional NDFrame objects"\n )\n\n else:\n name = getattr(obj, "name", None)\n if ignore_index or name is None:\n if axis == 1:\n # doing a row-wise concatenation so need everything\n # to line up\n name = 0\n else:\n # doing a column-wise concatenation so need series\n # to have unique names\n name = current_column\n current_column += 1\n\n obj = sample._constructor({name: obj}, copy=False)\n\n new_objs.append(obj)\n\n return new_objs\n\n def get_result(self):\n cons: Callable[..., DataFrame | Series]\n sample: DataFrame | Series\n\n # series only\n if self._is_series:\n sample = cast("Series", self.objs[0])\n\n # stack blocks\n if self.bm_axis == 0:\n name = com.consensus_name_attr(self.objs)\n cons = sample._constructor\n\n arrs = [ser._values for ser in self.objs]\n\n res = concat_compat(arrs, axis=0)\n\n new_index: Index\n if self.ignore_index:\n # We can avoid surprisingly-expensive _get_concat_axis\n new_index = default_index(len(res))\n else:\n new_index = self.new_axes[0]\n\n mgr = type(sample._mgr).from_array(res, index=new_index)\n\n result = sample._constructor_from_mgr(mgr, axes=mgr.axes)\n result._name = name\n return result.__finalize__(self, method="concat")\n\n # combine as columns in a frame\n else:\n data = dict(zip(range(len(self.objs)), self.objs))\n\n # GH28330 Preserves subclassed objects through concat\n cons = sample._constructor_expanddim\n\n index, columns = self.new_axes\n df = cons(data, index=index, copy=self.copy)\n df.columns = columns\n return df.__finalize__(self, method="concat")\n\n # combine block managers\n else:\n sample = cast("DataFrame", self.objs[0])\n\n mgrs_indexers = []\n for obj in self.objs:\n indexers = {}\n for ax, new_labels in enumerate(self.new_axes):\n # ::-1 to convert BlockManager ax to DataFrame ax\n if ax == self.bm_axis:\n # Suppress reindexing on concat axis\n continue\n\n # 1-ax to convert BlockManager axis to DataFrame axis\n obj_labels = obj.axes[1 - ax]\n if not new_labels.equals(obj_labels):\n indexers[ax] = obj_labels.get_indexer(new_labels)\n\n mgrs_indexers.append((obj._mgr, indexers))\n\n new_data = concatenate_managers(\n mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy\n )\n if not self.copy and not using_copy_on_write():\n new_data._consolidate_inplace()\n\n out = sample._constructor_from_mgr(new_data, axes=new_data.axes)\n return out.__finalize__(self, method="concat")\n\n def _get_result_dim(self) -> int:\n if self._is_series and self.bm_axis == 1:\n return 2\n else:\n return self.objs[0].ndim\n\n @cache_readonly\n def new_axes(self) -> list[Index]:\n ndim = self._get_result_dim()\n return [\n self._get_concat_axis if i == self.bm_axis else self._get_comb_axis(i)\n for i in range(ndim)\n ]\n\n def _get_comb_axis(self, i: AxisInt) -> Index:\n data_axis = self.objs[0]._get_block_manager_axis(i)\n return get_objs_combined_axis(\n self.objs,\n axis=data_axis,\n intersect=self.intersect,\n sort=self.sort,\n copy=self.copy,\n )\n\n @cache_readonly\n def _get_concat_axis(self) -> Index:\n """\n Return index to be used along concatenation axis.\n """\n if self._is_series:\n if self.bm_axis == 0:\n indexes = [x.index for x in self.objs]\n elif self.ignore_index:\n idx = default_index(len(self.objs))\n return idx\n elif self.keys is None:\n names: list[Hashable] = [None] * len(self.objs)\n num = 0\n has_names = False\n for i, x in enumerate(self.objs):\n if x.ndim != 1:\n raise TypeError(\n f"Cannot concatenate type 'Series' with "\n f"object of type '{type(x).__name__}'"\n )\n if x.name is not None:\n names[i] = x.name\n has_names = True\n else:\n names[i] = num\n num += 1\n if has_names:\n return Index(names)\n else:\n return default_index(len(self.objs))\n else:\n return ensure_index(self.keys).set_names(self.names)\n else:\n indexes = [x.axes[self.axis] for x in self.objs]\n\n if self.ignore_index:\n idx = default_index(sum(len(i) for i in indexes))\n return idx\n\n if self.keys is None:\n if self.levels is not None:\n raise ValueError("levels supported only when keys is not None")\n concat_axis = _concat_indexes(indexes)\n else:\n concat_axis = _make_concat_multiindex(\n indexes, self.keys, self.levels, self.names\n )\n\n self._maybe_check_integrity(concat_axis)\n\n return concat_axis\n\n def _maybe_check_integrity(self, concat_index: Index):\n if self.verify_integrity:\n if not concat_index.is_unique:\n overlap = concat_index[concat_index.duplicated()].unique()\n raise ValueError(f"Indexes have overlapping values: {overlap}")\n\n\ndef _concat_indexes(indexes) -> Index:\n return indexes[0].append(indexes[1:])\n\n\ndef _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex:\n if (levels is None and isinstance(keys[0], tuple)) or (\n levels is not None and len(levels) > 1\n ):\n zipped = list(zip(*keys))\n if names is None:\n names = [None] * len(zipped)\n\n if levels is None:\n _, levels = factorize_from_iterables(zipped)\n else:\n levels = [ensure_index(x) for x in levels]\n else:\n zipped = [keys]\n if names is None:\n names = [None]\n\n if levels is None:\n levels = [ensure_index(keys).unique()]\n else:\n levels = [ensure_index(x) for x in levels]\n\n for level in levels:\n if not level.is_unique:\n raise ValueError(f"Level values not unique: {level.tolist()}")\n\n if not all_indexes_same(indexes) or not all(level.is_unique for level in levels):\n codes_list = []\n\n # things are potentially different sizes, so compute the exact codes\n # for each level and pass those to MultiIndex.from_arrays\n\n for hlevel, level in zip(zipped, levels):\n to_concat = []\n if isinstance(hlevel, Index) and hlevel.equals(level):\n lens = [len(idx) for idx in indexes]\n codes_list.append(np.repeat(np.arange(len(hlevel)), lens))\n else:\n for key, index in zip(hlevel, indexes):\n # Find matching codes, include matching nan values as equal.\n mask = (isna(level) & isna(key)) | (level == key)\n if not mask.any():\n raise ValueError(f"Key {key} not in level {level}")\n i = np.nonzero(mask)[0][0]\n\n to_concat.append(np.repeat(i, len(index)))\n codes_list.append(np.concatenate(to_concat))\n\n concat_index = _concat_indexes(indexes)\n\n # these go at the end\n if isinstance(concat_index, MultiIndex):\n levels.extend(concat_index.levels)\n codes_list.extend(concat_index.codes)\n else:\n codes, categories = factorize_from_iterable(concat_index)\n levels.append(categories)\n codes_list.append(codes)\n\n if len(names) == len(levels):\n names = list(names)\n else:\n # make sure that all of the passed indices have the same nlevels\n if not len({idx.nlevels for idx in indexes}) == 1:\n raise AssertionError(\n "Cannot concat indices that do not have the same number of levels"\n )\n\n # also copies\n names = list(names) + list(get_unanimous_names(*indexes))\n\n return MultiIndex(\n levels=levels, codes=codes_list, names=names, verify_integrity=False\n )\n\n new_index = indexes[0]\n n = len(new_index)\n kpieces = len(indexes)\n\n # also copies\n new_names = list(names)\n new_levels = list(levels)\n\n # construct codes\n new_codes = []\n\n # do something a bit more speedy\n\n for hlevel, level in zip(zipped, levels):\n hlevel_index = ensure_index(hlevel)\n mapped = level.get_indexer(hlevel_index)\n\n mask = mapped == -1\n if mask.any():\n raise ValueError(\n f"Values not found in passed level: {hlevel_index[mask]!s}"\n )\n\n new_codes.append(np.repeat(mapped, n))\n\n if isinstance(new_index, MultiIndex):\n new_levels.extend(new_index.levels)\n new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes])\n else:\n new_levels.append(new_index.unique())\n single_codes = new_index.unique().get_indexer(new_index)\n new_codes.append(np.tile(single_codes, kpieces))\n\n if len(new_names) < len(new_levels):\n new_names.extend(new_index.names)\n\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n | .venv\Lib\site-packages\pandas\core\reshape\concat.py | concat.py | Python | 28,253 | 0.95 | 0.129754 | 0.064901 | vue-tools | 660 | 2024-09-09T13:54:53.351457 | BSD-3-Clause | false | a811279e631e1b4e76188293024cba69 |
from __future__ import annotations\n\nfrom collections import defaultdict\nfrom collections.abc import (\n Hashable,\n Iterable,\n)\nimport itertools\nfrom typing import (\n TYPE_CHECKING,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas._libs import missing as libmissing\nfrom pandas._libs.sparse import IntIndex\n\nfrom pandas.core.dtypes.common import (\n is_integer_dtype,\n is_list_like,\n is_object_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n ArrowDtype,\n CategoricalDtype,\n)\n\nfrom pandas.core.arrays import SparseArray\nfrom pandas.core.arrays.categorical import factorize_from_iterable\nfrom pandas.core.arrays.string_ import StringDtype\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.indexes.api import (\n Index,\n default_index,\n)\nfrom pandas.core.series import Series\n\nif TYPE_CHECKING:\n from pandas._typing import NpDtype\n\n\ndef get_dummies(\n data,\n prefix=None,\n prefix_sep: str | Iterable[str] | dict[str, str] = "_",\n dummy_na: bool = False,\n columns=None,\n sparse: bool = False,\n drop_first: bool = False,\n dtype: NpDtype | None = None,\n) -> DataFrame:\n """\n Convert categorical variable into dummy/indicator variables.\n\n Each variable is converted in as many 0/1 variables as there are different\n values. Columns in the output are each named after a value; if the input is\n a DataFrame, the name of the original variable is prepended to the value.\n\n Parameters\n ----------\n data : array-like, Series, or DataFrame\n Data of which to get dummy indicators.\n prefix : str, list of str, or dict of str, default None\n String to append DataFrame column names.\n Pass a list with length equal to the number of columns\n when calling get_dummies on a DataFrame. Alternatively, `prefix`\n can be a dictionary mapping column names to prefixes.\n prefix_sep : str, default '_'\n If appending prefix, separator/delimiter to use. Or pass a\n list or dictionary as with `prefix`.\n dummy_na : bool, default False\n Add a column to indicate NaNs, if False NaNs are ignored.\n columns : list-like, default None\n Column names in the DataFrame to be encoded.\n If `columns` is None then all the columns with\n `object`, `string`, or `category` dtype will be converted.\n sparse : bool, default False\n Whether the dummy-encoded columns should be backed by\n a :class:`SparseArray` (True) or a regular NumPy array (False).\n drop_first : bool, default False\n Whether to get k-1 dummies out of k categorical levels by removing the\n first level.\n dtype : dtype, default bool\n Data type for new columns. Only a single dtype is allowed.\n\n Returns\n -------\n DataFrame\n Dummy-coded data. If `data` contains other columns than the\n dummy-coded one(s), these will be prepended, unaltered, to the result.\n\n See Also\n --------\n Series.str.get_dummies : Convert Series of strings to dummy codes.\n :func:`~pandas.from_dummies` : Convert dummy codes to categorical ``DataFrame``.\n\n Notes\n -----\n Reference :ref:`the user guide <reshaping.dummies>` for more examples.\n\n Examples\n --------\n >>> s = pd.Series(list('abca'))\n\n >>> pd.get_dummies(s)\n a b c\n 0 True False False\n 1 False True False\n 2 False False True\n 3 True False False\n\n >>> s1 = ['a', 'b', np.nan]\n\n >>> pd.get_dummies(s1)\n a b\n 0 True False\n 1 False True\n 2 False False\n\n >>> pd.get_dummies(s1, dummy_na=True)\n a b NaN\n 0 True False False\n 1 False True False\n 2 False False True\n\n >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],\n ... 'C': [1, 2, 3]})\n\n >>> pd.get_dummies(df, prefix=['col1', 'col2'])\n C col1_a col1_b col2_a col2_b col2_c\n 0 1 True False False True False\n 1 2 False True True False False\n 2 3 True False False False True\n\n >>> pd.get_dummies(pd.Series(list('abcaa')))\n a b c\n 0 True False False\n 1 False True False\n 2 False False True\n 3 True False False\n 4 True False False\n\n >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)\n b c\n 0 False False\n 1 True False\n 2 False True\n 3 False False\n 4 False False\n\n >>> pd.get_dummies(pd.Series(list('abc')), dtype=float)\n a b c\n 0 1.0 0.0 0.0\n 1 0.0 1.0 0.0\n 2 0.0 0.0 1.0\n """\n from pandas.core.reshape.concat import concat\n\n dtypes_to_encode = ["object", "string", "category"]\n\n if isinstance(data, DataFrame):\n # determine columns being encoded\n if columns is None:\n data_to_encode = data.select_dtypes(include=dtypes_to_encode)\n elif not is_list_like(columns):\n raise TypeError("Input must be a list-like for parameter `columns`")\n else:\n data_to_encode = data[columns]\n\n # validate prefixes and separator to avoid silently dropping cols\n def check_len(item, name: str):\n if is_list_like(item):\n if not len(item) == data_to_encode.shape[1]:\n len_msg = (\n f"Length of '{name}' ({len(item)}) did not match the "\n "length of the columns being encoded "\n f"({data_to_encode.shape[1]})."\n )\n raise ValueError(len_msg)\n\n check_len(prefix, "prefix")\n check_len(prefix_sep, "prefix_sep")\n\n if isinstance(prefix, str):\n prefix = itertools.cycle([prefix])\n if isinstance(prefix, dict):\n prefix = [prefix[col] for col in data_to_encode.columns]\n\n if prefix is None:\n prefix = data_to_encode.columns\n\n # validate separators\n if isinstance(prefix_sep, str):\n prefix_sep = itertools.cycle([prefix_sep])\n elif isinstance(prefix_sep, dict):\n prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]\n\n with_dummies: list[DataFrame]\n if data_to_encode.shape == data.shape:\n # Encoding the entire df, do not prepend any dropped columns\n with_dummies = []\n elif columns is not None:\n # Encoding only cols specified in columns. Get all cols not in\n # columns to prepend to result.\n with_dummies = [data.drop(columns, axis=1)]\n else:\n # Encoding only object and category dtype columns. Get remaining\n # columns to prepend to result.\n with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]\n\n for col, pre, sep in zip(data_to_encode.items(), prefix, prefix_sep):\n # col is (column_name, column), use just column data here\n dummy = _get_dummies_1d(\n col[1],\n prefix=pre,\n prefix_sep=sep,\n dummy_na=dummy_na,\n sparse=sparse,\n drop_first=drop_first,\n dtype=dtype,\n )\n with_dummies.append(dummy)\n result = concat(with_dummies, axis=1)\n else:\n result = _get_dummies_1d(\n data,\n prefix,\n prefix_sep,\n dummy_na,\n sparse=sparse,\n drop_first=drop_first,\n dtype=dtype,\n )\n return result\n\n\ndef _get_dummies_1d(\n data,\n prefix,\n prefix_sep: str | Iterable[str] | dict[str, str] = "_",\n dummy_na: bool = False,\n sparse: bool = False,\n drop_first: bool = False,\n dtype: NpDtype | None = None,\n) -> DataFrame:\n from pandas.core.reshape.concat import concat\n\n # Series avoids inconsistent NaN handling\n codes, levels = factorize_from_iterable(Series(data, copy=False))\n\n if dtype is None and hasattr(data, "dtype"):\n input_dtype = data.dtype\n if isinstance(input_dtype, CategoricalDtype):\n input_dtype = input_dtype.categories.dtype\n\n if isinstance(input_dtype, ArrowDtype):\n import pyarrow as pa\n\n dtype = ArrowDtype(pa.bool_()) # type: ignore[assignment]\n elif (\n isinstance(input_dtype, StringDtype)\n and input_dtype.na_value is libmissing.NA\n ):\n dtype = pandas_dtype("boolean") # type: ignore[assignment]\n else:\n dtype = np.dtype(bool)\n elif dtype is None:\n dtype = np.dtype(bool)\n\n _dtype = pandas_dtype(dtype)\n\n if is_object_dtype(_dtype):\n raise ValueError("dtype=object is not a valid dtype for get_dummies")\n\n def get_empty_frame(data) -> DataFrame:\n index: Index | np.ndarray\n if isinstance(data, Series):\n index = data.index\n else:\n index = default_index(len(data))\n return DataFrame(index=index)\n\n # if all NaN\n if not dummy_na and len(levels) == 0:\n return get_empty_frame(data)\n\n codes = codes.copy()\n if dummy_na:\n codes[codes == -1] = len(levels)\n levels = levels.insert(len(levels), np.nan)\n\n # if dummy_na, we just fake a nan level. drop_first will drop it again\n if drop_first and len(levels) == 1:\n return get_empty_frame(data)\n\n number_of_cols = len(levels)\n\n if prefix is None:\n dummy_cols = levels\n else:\n dummy_cols = Index([f"{prefix}{prefix_sep}{level}" for level in levels])\n\n index: Index | None\n if isinstance(data, Series):\n index = data.index\n else:\n index = None\n\n if sparse:\n fill_value: bool | float\n if is_integer_dtype(dtype):\n fill_value = 0\n elif dtype == np.dtype(bool):\n fill_value = False\n else:\n fill_value = 0.0\n\n sparse_series = []\n N = len(data)\n sp_indices: list[list] = [[] for _ in range(len(dummy_cols))]\n mask = codes != -1\n codes = codes[mask]\n n_idx = np.arange(N)[mask]\n\n for ndx, code in zip(n_idx, codes):\n sp_indices[code].append(ndx)\n\n if drop_first:\n # remove first categorical level to avoid perfect collinearity\n # GH12042\n sp_indices = sp_indices[1:]\n dummy_cols = dummy_cols[1:]\n for col, ixs in zip(dummy_cols, sp_indices):\n sarr = SparseArray(\n np.ones(len(ixs), dtype=dtype),\n sparse_index=IntIndex(N, ixs),\n fill_value=fill_value,\n dtype=dtype,\n )\n sparse_series.append(Series(data=sarr, index=index, name=col, copy=False))\n\n return concat(sparse_series, axis=1, copy=False)\n\n else:\n # ensure ndarray layout is column-major\n shape = len(codes), number_of_cols\n dummy_dtype: NpDtype\n if isinstance(_dtype, np.dtype):\n dummy_dtype = _dtype\n else:\n dummy_dtype = np.bool_\n dummy_mat = np.zeros(shape=shape, dtype=dummy_dtype, order="F")\n dummy_mat[np.arange(len(codes)), codes] = 1\n\n if not dummy_na:\n # reset NaN GH4446\n dummy_mat[codes == -1] = 0\n\n if drop_first:\n # remove first GH12042\n dummy_mat = dummy_mat[:, 1:]\n dummy_cols = dummy_cols[1:]\n return DataFrame(dummy_mat, index=index, columns=dummy_cols, dtype=_dtype)\n\n\ndef from_dummies(\n data: DataFrame,\n sep: None | str = None,\n default_category: None | Hashable | dict[str, Hashable] = None,\n) -> DataFrame:\n """\n Create a categorical ``DataFrame`` from a ``DataFrame`` of dummy variables.\n\n Inverts the operation performed by :func:`~pandas.get_dummies`.\n\n .. versionadded:: 1.5.0\n\n Parameters\n ----------\n data : DataFrame\n Data which contains dummy-coded variables in form of integer columns of\n 1's and 0's.\n sep : str, default None\n Separator used in the column names of the dummy categories they are\n character indicating the separation of the categorical names from the prefixes.\n For example, if your column names are 'prefix_A' and 'prefix_B',\n you can strip the underscore by specifying sep='_'.\n default_category : None, Hashable or dict of Hashables, default None\n The default category is the implied category when a value has none of the\n listed categories specified with a one, i.e. if all dummies in a row are\n zero. Can be a single value for all variables or a dict directly mapping\n the default categories to a prefix of a variable.\n\n Returns\n -------\n DataFrame\n Categorical data decoded from the dummy input-data.\n\n Raises\n ------\n ValueError\n * When the input ``DataFrame`` ``data`` contains NA values.\n * When the input ``DataFrame`` ``data`` contains column names with separators\n that do not match the separator specified with ``sep``.\n * When a ``dict`` passed to ``default_category`` does not include an implied\n category for each prefix.\n * When a value in ``data`` has more than one category assigned to it.\n * When ``default_category=None`` and a value in ``data`` has no category\n assigned to it.\n TypeError\n * When the input ``data`` is not of type ``DataFrame``.\n * When the input ``DataFrame`` ``data`` contains non-dummy data.\n * When the passed ``sep`` is of a wrong data type.\n * When the passed ``default_category`` is of a wrong data type.\n\n See Also\n --------\n :func:`~pandas.get_dummies` : Convert ``Series`` or ``DataFrame`` to dummy codes.\n :class:`~pandas.Categorical` : Represent a categorical variable in classic.\n\n Notes\n -----\n The columns of the passed dummy data should only include 1's and 0's,\n or boolean values.\n\n Examples\n --------\n >>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0],\n ... "c": [0, 0, 1, 0]})\n\n >>> df\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n\n >>> pd.from_dummies(df)\n 0 a\n 1 b\n 2 c\n 3 a\n\n >>> df = pd.DataFrame({"col1_a": [1, 0, 1], "col1_b": [0, 1, 0],\n ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0],\n ... "col2_c": [0, 0, 1]})\n\n >>> df\n col1_a col1_b col2_a col2_b col2_c\n 0 1 0 0 1 0\n 1 0 1 1 0 0\n 2 1 0 0 0 1\n\n >>> pd.from_dummies(df, sep="_")\n col1 col2\n 0 a b\n 1 b a\n 2 a c\n\n >>> df = pd.DataFrame({"col1_a": [1, 0, 0], "col1_b": [0, 1, 0],\n ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0],\n ... "col2_c": [0, 0, 0]})\n\n >>> df\n col1_a col1_b col2_a col2_b col2_c\n 0 1 0 0 1 0\n 1 0 1 1 0 0\n 2 0 0 0 0 0\n\n >>> pd.from_dummies(df, sep="_", default_category={"col1": "d", "col2": "e"})\n col1 col2\n 0 a b\n 1 b a\n 2 d e\n """\n from pandas.core.reshape.concat import concat\n\n if not isinstance(data, DataFrame):\n raise TypeError(\n "Expected 'data' to be a 'DataFrame'; "\n f"Received 'data' of type: {type(data).__name__}"\n )\n\n col_isna_mask = cast(Series, data.isna().any())\n\n if col_isna_mask.any():\n raise ValueError(\n "Dummy DataFrame contains NA value in column: "\n f"'{col_isna_mask.idxmax()}'"\n )\n\n # index data with a list of all columns that are dummies\n try:\n data_to_decode = data.astype("boolean", copy=False)\n except TypeError:\n raise TypeError("Passed DataFrame contains non-dummy data")\n\n # collect prefixes and get lists to slice data for each prefix\n variables_slice = defaultdict(list)\n if sep is None:\n variables_slice[""] = list(data.columns)\n elif isinstance(sep, str):\n for col in data_to_decode.columns:\n prefix = col.split(sep)[0]\n if len(prefix) == len(col):\n raise ValueError(f"Separator not specified for column: {col}")\n variables_slice[prefix].append(col)\n else:\n raise TypeError(\n "Expected 'sep' to be of type 'str' or 'None'; "\n f"Received 'sep' of type: {type(sep).__name__}"\n )\n\n if default_category is not None:\n if isinstance(default_category, dict):\n if not len(default_category) == len(variables_slice):\n len_msg = (\n f"Length of 'default_category' ({len(default_category)}) "\n f"did not match the length of the columns being encoded "\n f"({len(variables_slice)})"\n )\n raise ValueError(len_msg)\n elif isinstance(default_category, Hashable):\n default_category = dict(\n zip(variables_slice, [default_category] * len(variables_slice))\n )\n else:\n raise TypeError(\n "Expected 'default_category' to be of type "\n "'None', 'Hashable', or 'dict'; "\n "Received 'default_category' of type: "\n f"{type(default_category).__name__}"\n )\n\n cat_data = {}\n for prefix, prefix_slice in variables_slice.items():\n if sep is None:\n cats = prefix_slice.copy()\n else:\n cats = [col[len(prefix + sep) :] for col in prefix_slice]\n assigned = data_to_decode.loc[:, prefix_slice].sum(axis=1)\n if any(assigned > 1):\n raise ValueError(\n "Dummy DataFrame contains multi-assignment(s); "\n f"First instance in row: {assigned.idxmax()}"\n )\n if any(assigned == 0):\n if isinstance(default_category, dict):\n cats.append(default_category[prefix])\n else:\n raise ValueError(\n "Dummy DataFrame contains unassigned value(s); "\n f"First instance in row: {assigned.idxmin()}"\n )\n data_slice = concat(\n (data_to_decode.loc[:, prefix_slice], assigned == 0), axis=1\n )\n else:\n data_slice = data_to_decode.loc[:, prefix_slice]\n cats_array = data._constructor_sliced(cats, dtype=data.columns.dtype)\n # get indices of True entries along axis=1\n true_values = data_slice.idxmax(axis=1)\n indexer = data_slice.columns.get_indexer_for(true_values)\n cat_data[prefix] = cats_array.take(indexer).set_axis(data.index)\n\n result = DataFrame(cat_data)\n if sep is not None:\n result.columns = result.columns.astype(data.columns.dtype)\n return result\n | .venv\Lib\site-packages\pandas\core\reshape\encoding.py | encoding.py | Python | 19,016 | 0.95 | 0.122592 | 0.059184 | react-lib | 275 | 2024-06-14T04:01:41.233803 | GPL-3.0 | false | eb754ef20eb52acfa7b86e3a727c020e |
from __future__ import annotations\n\nimport re\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.util._decorators import Appender\n\nfrom pandas.core.dtypes.common import is_list_like\nfrom pandas.core.dtypes.concat import concat_compat\nfrom pandas.core.dtypes.missing import notna\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.indexes.api import MultiIndex\nfrom pandas.core.reshape.concat import concat\nfrom pandas.core.reshape.util import tile_compat\nfrom pandas.core.shared_docs import _shared_docs\nfrom pandas.core.tools.numeric import to_numeric\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n\n from pandas._typing import AnyArrayLike\n\n from pandas import DataFrame\n\n\ndef ensure_list_vars(arg_vars, variable: str, columns) -> list:\n if arg_vars is not None:\n if not is_list_like(arg_vars):\n return [arg_vars]\n elif isinstance(columns, MultiIndex) and not isinstance(arg_vars, list):\n raise ValueError(\n f"{variable} must be a list of tuples when columns are a MultiIndex"\n )\n else:\n return list(arg_vars)\n else:\n return []\n\n\n@Appender(_shared_docs["melt"] % {"caller": "pd.melt(df, ", "other": "DataFrame.melt"})\ndef melt(\n frame: DataFrame,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name: Hashable = "value",\n col_level=None,\n ignore_index: bool = True,\n) -> DataFrame:\n if value_name in frame.columns:\n raise ValueError(\n f"value_name ({value_name}) cannot match an element in "\n "the DataFrame columns."\n )\n id_vars = ensure_list_vars(id_vars, "id_vars", frame.columns)\n value_vars_was_not_none = value_vars is not None\n value_vars = ensure_list_vars(value_vars, "value_vars", frame.columns)\n\n if id_vars or value_vars:\n if col_level is not None:\n level = frame.columns.get_level_values(col_level)\n else:\n level = frame.columns\n labels = id_vars + value_vars\n idx = level.get_indexer_for(labels)\n missing = idx == -1\n if missing.any():\n missing_labels = [\n lab for lab, not_found in zip(labels, missing) if not_found\n ]\n raise KeyError(\n "The following id_vars or value_vars are not present in "\n f"the DataFrame: {missing_labels}"\n )\n if value_vars_was_not_none:\n frame = frame.iloc[:, algos.unique(idx)]\n else:\n frame = frame.copy()\n else:\n frame = frame.copy()\n\n if col_level is not None: # allow list or other?\n # frame is a copy\n frame.columns = frame.columns.get_level_values(col_level)\n\n if var_name is None:\n if isinstance(frame.columns, MultiIndex):\n if len(frame.columns.names) == len(set(frame.columns.names)):\n var_name = frame.columns.names\n else:\n var_name = [f"variable_{i}" for i in range(len(frame.columns.names))]\n else:\n var_name = [\n frame.columns.name if frame.columns.name is not None else "variable"\n ]\n elif is_list_like(var_name):\n raise ValueError(f"{var_name=} must be a scalar.")\n else:\n var_name = [var_name]\n\n num_rows, K = frame.shape\n num_cols_adjusted = K - len(id_vars)\n\n mdata: dict[Hashable, AnyArrayLike] = {}\n for col in id_vars:\n id_data = frame.pop(col)\n if not isinstance(id_data.dtype, np.dtype):\n # i.e. ExtensionDtype\n if num_cols_adjusted > 0:\n mdata[col] = concat([id_data] * num_cols_adjusted, ignore_index=True)\n else:\n # We can't concat empty list. (GH 46044)\n mdata[col] = type(id_data)([], name=id_data.name, dtype=id_data.dtype)\n else:\n mdata[col] = np.tile(id_data._values, num_cols_adjusted)\n\n mcolumns = id_vars + var_name + [value_name]\n\n if frame.shape[1] > 0 and not any(\n not isinstance(dt, np.dtype) and dt._supports_2d for dt in frame.dtypes\n ):\n mdata[value_name] = concat(\n [frame.iloc[:, i] for i in range(frame.shape[1])]\n ).values\n else:\n mdata[value_name] = frame._values.ravel("F")\n for i, col in enumerate(var_name):\n mdata[col] = frame.columns._get_level_values(i).repeat(num_rows)\n\n result = frame._constructor(mdata, columns=mcolumns)\n\n if not ignore_index:\n result.index = tile_compat(frame.index, num_cols_adjusted)\n\n return result\n\n\ndef lreshape(data: DataFrame, groups: dict, dropna: bool = True) -> DataFrame:\n """\n Reshape wide-format data to long. Generalized inverse of DataFrame.pivot.\n\n Accepts a dictionary, ``groups``, in which each key is a new column name\n and each value is a list of old column names that will be "melted" under\n the new column name as part of the reshape.\n\n Parameters\n ----------\n data : DataFrame\n The wide-format DataFrame.\n groups : dict\n {new_name : list_of_columns}.\n dropna : bool, default True\n Do not include columns whose entries are all NaN.\n\n Returns\n -------\n DataFrame\n Reshaped DataFrame.\n\n See Also\n --------\n melt : Unpivot a DataFrame from wide to long format, optionally leaving\n identifiers set.\n pivot : Create a spreadsheet-style pivot table as a DataFrame.\n DataFrame.pivot : Pivot without aggregation that can handle\n non-numeric data.\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n DataFrame.unstack : Pivot based on the index values instead of a\n column.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Examples\n --------\n >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],\n ... 'team': ['Red Sox', 'Yankees'],\n ... 'year1': [2007, 2007], 'year2': [2008, 2008]})\n >>> data\n hr1 hr2 team year1 year2\n 0 514 545 Red Sox 2007 2008\n 1 573 526 Yankees 2007 2008\n\n >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})\n team year hr\n 0 Red Sox 2007 514\n 1 Yankees 2007 573\n 2 Red Sox 2008 545\n 3 Yankees 2008 526\n """\n mdata = {}\n pivot_cols = []\n all_cols: set[Hashable] = set()\n K = len(next(iter(groups.values())))\n for target, names in groups.items():\n if len(names) != K:\n raise ValueError("All column lists must be same length")\n to_concat = [data[col]._values for col in names]\n\n mdata[target] = concat_compat(to_concat)\n pivot_cols.append(target)\n all_cols = all_cols.union(names)\n\n id_cols = list(data.columns.difference(all_cols))\n for col in id_cols:\n mdata[col] = np.tile(data[col]._values, K)\n\n if dropna:\n mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)\n for c in pivot_cols:\n mask &= notna(mdata[c])\n if not mask.all():\n mdata = {k: v[mask] for k, v in mdata.items()}\n\n return data._constructor(mdata, columns=id_cols + pivot_cols)\n\n\ndef wide_to_long(\n df: DataFrame, stubnames, i, j, sep: str = "", suffix: str = r"\d+"\n) -> DataFrame:\n r"""\n Unpivot a DataFrame from wide to long format.\n\n Less flexible but more user-friendly than melt.\n\n With stubnames ['A', 'B'], this function expects to find one or more\n group of columns with format\n A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...\n You specify what you want to call this suffix in the resulting long format\n with `j` (for example `j='year'`)\n\n Each row of these wide variables are assumed to be uniquely identified by\n `i` (can be a single column name or a list of column names)\n\n All remaining variables in the data frame are left intact.\n\n Parameters\n ----------\n df : DataFrame\n The wide-format DataFrame.\n stubnames : str or list-like\n The stub name(s). The wide format variables are assumed to\n start with the stub names.\n i : str or list-like\n Column(s) to use as id variable(s).\n j : str\n The name of the sub-observation variable. What you wish to name your\n suffix in the long format.\n sep : str, default ""\n A character indicating the separation of the variable names\n in the wide format, to be stripped from the names in the long format.\n For example, if your column names are A-suffix1, A-suffix2, you\n can strip the hyphen by specifying `sep='-'`.\n suffix : str, default '\\d+'\n A regular expression capturing the wanted suffixes. '\\d+' captures\n numeric suffixes. Suffixes with no numbers could be specified with the\n negated character class '\\D+'. You can also further disambiguate\n suffixes, for example, if your wide variables are of the form A-one,\n B-two,.., and you have an unrelated column A-rating, you can ignore the\n last one by specifying `suffix='(!?one|two)'`. When all suffixes are\n numeric, they are cast to int64/float64.\n\n Returns\n -------\n DataFrame\n A DataFrame that contains each stub name as a variable, with new index\n (i, j).\n\n See Also\n --------\n melt : Unpivot a DataFrame from wide to long format, optionally leaving\n identifiers set.\n pivot : Create a spreadsheet-style pivot table as a DataFrame.\n DataFrame.pivot : Pivot without aggregation that can handle\n non-numeric data.\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n DataFrame.unstack : Pivot based on the index values instead of a\n column.\n\n Notes\n -----\n All extra variables are left untouched. This simply uses\n `pandas.melt` under the hood, but is hard-coded to "do the right thing"\n in a typical case.\n\n Examples\n --------\n >>> np.random.seed(123)\n >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},\n ... "A1980" : {0 : "d", 1 : "e", 2 : "f"},\n ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},\n ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},\n ... "X" : dict(zip(range(3), np.random.randn(3)))\n ... })\n >>> df["id"] = df.index\n >>> df\n A1970 A1980 B1970 B1980 X id\n 0 a d 2.5 3.2 -1.085631 0\n 1 b e 1.2 1.3 0.997345 1\n 2 c f 0.7 0.1 0.282978 2\n >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")\n ... # doctest: +NORMALIZE_WHITESPACE\n X A B\n id year\n 0 1970 -1.085631 a 2.5\n 1 1970 0.997345 b 1.2\n 2 1970 0.282978 c 0.7\n 0 1980 -1.085631 d 3.2\n 1 1980 0.997345 e 1.3\n 2 1980 0.282978 f 0.1\n\n With multiple id columns\n\n >>> df = pd.DataFrame({\n ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],\n ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],\n ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]\n ... })\n >>> df\n famid birth ht1 ht2\n 0 1 1 2.8 3.4\n 1 1 2 2.9 3.8\n 2 1 3 2.2 2.9\n 3 2 1 2.0 3.2\n 4 2 2 1.8 2.8\n 5 2 3 1.9 2.4\n 6 3 1 2.2 3.3\n 7 3 2 2.3 3.4\n 8 3 3 2.1 2.9\n >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')\n >>> l\n ... # doctest: +NORMALIZE_WHITESPACE\n ht\n famid birth age\n 1 1 1 2.8\n 2 3.4\n 2 1 2.9\n 2 3.8\n 3 1 2.2\n 2 2.9\n 2 1 1 2.0\n 2 3.2\n 2 1 1.8\n 2 2.8\n 3 1 1.9\n 2 2.4\n 3 1 1 2.2\n 2 3.3\n 2 1 2.3\n 2 3.4\n 3 1 2.1\n 2 2.9\n\n Going from long back to wide just takes some creative use of `unstack`\n\n >>> w = l.unstack()\n >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)\n >>> w.reset_index()\n famid birth ht1 ht2\n 0 1 1 2.8 3.4\n 1 1 2 2.9 3.8\n 2 1 3 2.2 2.9\n 3 2 1 2.0 3.2\n 4 2 2 1.8 2.8\n 5 2 3 1.9 2.4\n 6 3 1 2.2 3.3\n 7 3 2 2.3 3.4\n 8 3 3 2.1 2.9\n\n Less wieldy column names are also handled\n\n >>> np.random.seed(0)\n >>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3),\n ... 'A(weekly)-2011': np.random.rand(3),\n ... 'B(weekly)-2010': np.random.rand(3),\n ... 'B(weekly)-2011': np.random.rand(3),\n ... 'X' : np.random.randint(3, size=3)})\n >>> df['id'] = df.index\n >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id\n 0 0.548814 0.544883 0.437587 0.383442 0 0\n 1 0.715189 0.423655 0.891773 0.791725 1 1\n 2 0.602763 0.645894 0.963663 0.528895 1 2\n\n >>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id',\n ... j='year', sep='-')\n ... # doctest: +NORMALIZE_WHITESPACE\n X A(weekly) B(weekly)\n id year\n 0 2010 0 0.548814 0.437587\n 1 2010 1 0.715189 0.891773\n 2 2010 1 0.602763 0.963663\n 0 2011 0 0.544883 0.383442\n 1 2011 1 0.423655 0.791725\n 2 2011 1 0.645894 0.528895\n\n If we have many columns, we could also use a regex to find our\n stubnames and pass that list on to wide_to_long\n\n >>> stubnames = sorted(\n ... set([match[0] for match in df.columns.str.findall(\n ... r'[A-B]\(.*\)').values if match != []])\n ... )\n >>> list(stubnames)\n ['A(weekly)', 'B(weekly)']\n\n All of the above examples have integers as suffixes. It is possible to\n have non-integers as suffixes.\n\n >>> df = pd.DataFrame({\n ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],\n ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],\n ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]\n ... })\n >>> df\n famid birth ht_one ht_two\n 0 1 1 2.8 3.4\n 1 1 2 2.9 3.8\n 2 1 3 2.2 2.9\n 3 2 1 2.0 3.2\n 4 2 2 1.8 2.8\n 5 2 3 1.9 2.4\n 6 3 1 2.2 3.3\n 7 3 2 2.3 3.4\n 8 3 3 2.1 2.9\n\n >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',\n ... sep='_', suffix=r'\w+')\n >>> l\n ... # doctest: +NORMALIZE_WHITESPACE\n ht\n famid birth age\n 1 1 one 2.8\n two 3.4\n 2 one 2.9\n two 3.8\n 3 one 2.2\n two 2.9\n 2 1 one 2.0\n two 3.2\n 2 one 1.8\n two 2.8\n 3 one 1.9\n two 2.4\n 3 1 one 2.2\n two 3.3\n 2 one 2.3\n two 3.4\n 3 one 2.1\n two 2.9\n """\n\n def get_var_names(df, stub: str, sep: str, suffix: str):\n regex = rf"^{re.escape(stub)}{re.escape(sep)}{suffix}$"\n return df.columns[df.columns.str.match(regex)]\n\n def melt_stub(df, stub: str, i, j, value_vars, sep: str):\n newdf = melt(\n df,\n id_vars=i,\n value_vars=value_vars,\n value_name=stub.rstrip(sep),\n var_name=j,\n )\n newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "", regex=True)\n\n # GH17627 Cast numerics suffixes to int/float\n try:\n newdf[j] = to_numeric(newdf[j])\n except (TypeError, ValueError, OverflowError):\n # TODO: anything else to catch?\n pass\n\n return newdf.set_index(i + [j])\n\n if not is_list_like(stubnames):\n stubnames = [stubnames]\n else:\n stubnames = list(stubnames)\n\n if df.columns.isin(stubnames).any():\n raise ValueError("stubname can't be identical to a column name")\n\n if not is_list_like(i):\n i = [i]\n else:\n i = list(i)\n\n if df[i].duplicated().any():\n raise ValueError("the id variables need to uniquely identify each row")\n\n _melted = []\n value_vars_flattened = []\n for stub in stubnames:\n value_var = get_var_names(df, stub, sep, suffix)\n value_vars_flattened.extend(value_var)\n _melted.append(melt_stub(df, stub, i, j, value_var, sep))\n\n melted = concat(_melted, axis=1)\n id_vars = df.columns.difference(value_vars_flattened)\n new = df[id_vars]\n\n if len(i) == 1:\n return new.set_index(i).join(melted)\n else:\n return new.merge(melted.reset_index(), on=i).set_index(i + [j])\n | .venv\Lib\site-packages\pandas\core\reshape\melt.py | melt.py | Python | 17,400 | 0.95 | 0.109375 | 0.011261 | react-lib | 398 | 2024-08-23T19:22:00.678280 | GPL-3.0 | false | 7351f43a402c5acf5a27781a38e7c15b |
"""\nSQL-style merge routines\n"""\nfrom __future__ import annotations\n\nfrom collections.abc import (\n Hashable,\n Sequence,\n)\nimport datetime\nfrom functools import partial\nfrom typing import (\n TYPE_CHECKING,\n Literal,\n cast,\n final,\n)\nimport uuid\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n Timedelta,\n hashtable as libhashtable,\n join as libjoin,\n lib,\n)\nfrom pandas._libs.lib import is_range_indexer\nfrom pandas._typing import (\n AnyArrayLike,\n ArrayLike,\n IndexLabel,\n JoinHow,\n MergeHow,\n Shape,\n Suffixes,\n npt,\n)\nfrom pandas.errors import MergeError\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n cache_readonly,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.base import ExtensionDtype\nfrom pandas.core.dtypes.cast import find_common_type\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_object,\n is_bool,\n is_bool_dtype,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_list_like,\n is_number,\n is_numeric_dtype,\n is_object_dtype,\n is_string_dtype,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import (\n isna,\n na_value_for_dtype,\n)\n\nfrom pandas import (\n ArrowDtype,\n Categorical,\n Index,\n MultiIndex,\n Series,\n)\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import (\n ArrowExtensionArray,\n BaseMaskedArray,\n ExtensionArray,\n)\nfrom pandas.core.arrays.string_ import StringDtype\nimport pandas.core.common as com\nfrom pandas.core.construction import (\n ensure_wrapped_if_datetimelike,\n extract_array,\n)\nfrom pandas.core.frame import _merge_doc\nfrom pandas.core.indexes.api import default_index\nfrom pandas.core.sorting import (\n get_group_index,\n is_int64_overflow_possible,\n)\n\nif TYPE_CHECKING:\n from pandas import DataFrame\n from pandas.core import groupby\n from pandas.core.arrays import DatetimeArray\n from pandas.core.indexes.frozen import FrozenList\n\n_factorizers = {\n np.int64: libhashtable.Int64Factorizer,\n np.longlong: libhashtable.Int64Factorizer,\n np.int32: libhashtable.Int32Factorizer,\n np.int16: libhashtable.Int16Factorizer,\n np.int8: libhashtable.Int8Factorizer,\n np.uint64: libhashtable.UInt64Factorizer,\n np.uint32: libhashtable.UInt32Factorizer,\n np.uint16: libhashtable.UInt16Factorizer,\n np.uint8: libhashtable.UInt8Factorizer,\n np.bool_: libhashtable.UInt8Factorizer,\n np.float64: libhashtable.Float64Factorizer,\n np.float32: libhashtable.Float32Factorizer,\n np.complex64: libhashtable.Complex64Factorizer,\n np.complex128: libhashtable.Complex128Factorizer,\n np.object_: libhashtable.ObjectFactorizer,\n}\n\n# See https://github.com/pandas-dev/pandas/issues/52451\nif np.intc is not np.int32:\n _factorizers[np.intc] = libhashtable.Int64Factorizer\n\n_known = (np.ndarray, ExtensionArray, Index, ABCSeries)\n\n\n@Substitution("\nleft : DataFrame or named Series")\n@Appender(_merge_doc, indents=0)\ndef merge(\n left: DataFrame | Series,\n right: DataFrame | Series,\n how: MergeHow = "inner",\n on: IndexLabel | AnyArrayLike | None = None,\n left_on: IndexLabel | AnyArrayLike | None = None,\n right_on: IndexLabel | AnyArrayLike | None = None,\n left_index: bool = False,\n right_index: bool = False,\n sort: bool = False,\n suffixes: Suffixes = ("_x", "_y"),\n copy: bool | None = None,\n indicator: str | bool = False,\n validate: str | None = None,\n) -> DataFrame:\n left_df = _validate_operand(left)\n right_df = _validate_operand(right)\n if how == "cross":\n return _cross_merge(\n left_df,\n right_df,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n sort=sort,\n suffixes=suffixes,\n indicator=indicator,\n validate=validate,\n copy=copy,\n )\n else:\n op = _MergeOperation(\n left_df,\n right_df,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n sort=sort,\n suffixes=suffixes,\n indicator=indicator,\n validate=validate,\n )\n return op.get_result(copy=copy)\n\n\ndef _cross_merge(\n left: DataFrame,\n right: DataFrame,\n on: IndexLabel | AnyArrayLike | None = None,\n left_on: IndexLabel | AnyArrayLike | None = None,\n right_on: IndexLabel | AnyArrayLike | None = None,\n left_index: bool = False,\n right_index: bool = False,\n sort: bool = False,\n suffixes: Suffixes = ("_x", "_y"),\n copy: bool | None = None,\n indicator: str | bool = False,\n validate: str | None = None,\n) -> DataFrame:\n """\n See merge.__doc__ with how='cross'\n """\n\n if (\n left_index\n or right_index\n or right_on is not None\n or left_on is not None\n or on is not None\n ):\n raise MergeError(\n "Can not pass on, right_on, left_on or set right_index=True or "\n "left_index=True"\n )\n\n cross_col = f"_cross_{uuid.uuid4()}"\n left = left.assign(**{cross_col: 1})\n right = right.assign(**{cross_col: 1})\n\n left_on = right_on = [cross_col]\n\n res = merge(\n left,\n right,\n how="inner",\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n sort=sort,\n suffixes=suffixes,\n indicator=indicator,\n validate=validate,\n copy=copy,\n )\n del res[cross_col]\n return res\n\n\ndef _groupby_and_merge(\n by, left: DataFrame | Series, right: DataFrame | Series, merge_pieces\n):\n """\n groupby & merge; we are always performing a left-by type operation\n\n Parameters\n ----------\n by: field to group\n left: DataFrame\n right: DataFrame\n merge_pieces: function for merging\n """\n pieces = []\n if not isinstance(by, (list, tuple)):\n by = [by]\n\n lby = left.groupby(by, sort=False)\n rby: groupby.DataFrameGroupBy | groupby.SeriesGroupBy | None = None\n\n # if we can groupby the rhs\n # then we can get vastly better perf\n if all(item in right.columns for item in by):\n rby = right.groupby(by, sort=False)\n\n for key, lhs in lby._grouper.get_iterator(lby._selected_obj, axis=lby.axis):\n if rby is None:\n rhs = right\n else:\n try:\n rhs = right.take(rby.indices[key])\n except KeyError:\n # key doesn't exist in left\n lcols = lhs.columns.tolist()\n cols = lcols + [r for r in right.columns if r not in set(lcols)]\n merged = lhs.reindex(columns=cols)\n merged.index = range(len(merged))\n pieces.append(merged)\n continue\n\n merged = merge_pieces(lhs, rhs)\n\n # make sure join keys are in the merged\n # TODO, should merge_pieces do this?\n merged[by] = key\n\n pieces.append(merged)\n\n # preserve the original order\n # if we have a missing piece this can be reset\n from pandas.core.reshape.concat import concat\n\n result = concat(pieces, ignore_index=True)\n result = result.reindex(columns=pieces[0].columns, copy=False)\n return result, lby\n\n\ndef merge_ordered(\n left: DataFrame | Series,\n right: DataFrame | Series,\n on: IndexLabel | None = None,\n left_on: IndexLabel | None = None,\n right_on: IndexLabel | None = None,\n left_by=None,\n right_by=None,\n fill_method: str | None = None,\n suffixes: Suffixes = ("_x", "_y"),\n how: JoinHow = "outer",\n) -> DataFrame:\n """\n Perform a merge for ordered data with optional filling/interpolation.\n\n Designed for ordered data like time series data. Optionally\n perform group-wise merge (see examples).\n\n Parameters\n ----------\n left : DataFrame or named Series\n right : DataFrame or named Series\n on : label or list\n Field names to join on. Must be found in both DataFrames.\n left_on : label or list, or array-like\n Field names to join on in left DataFrame. Can be a vector or list of\n vectors of the length of the DataFrame to use a particular vector as\n the join key instead of columns.\n right_on : label or list, or array-like\n Field names to join on in right DataFrame or vector/list of vectors per\n left_on docs.\n left_by : column name or list of column names\n Group left DataFrame by group columns and merge piece by piece with\n right DataFrame. Must be None if either left or right are a Series.\n right_by : column name or list of column names\n Group right DataFrame by group columns and merge piece by piece with\n left DataFrame. Must be None if either left or right are a Series.\n fill_method : {'ffill', None}, default None\n Interpolation method for data.\n suffixes : list-like, default is ("_x", "_y")\n A length-2 sequence where each element is optionally a string\n indicating the suffix to add to overlapping column names in\n `left` and `right` respectively. Pass a value of `None` instead\n of a string to indicate that the column name from `left` or\n `right` should be left as-is, with no suffix. At least one of the\n values must not be None.\n\n how : {'left', 'right', 'outer', 'inner'}, default 'outer'\n * left: use only keys from left frame (SQL: left outer join)\n * right: use only keys from right frame (SQL: right outer join)\n * outer: use union of keys from both frames (SQL: full outer join)\n * inner: use intersection of keys from both frames (SQL: inner join).\n\n Returns\n -------\n DataFrame\n The merged DataFrame output type will be the same as\n 'left', if it is a subclass of DataFrame.\n\n See Also\n --------\n merge : Merge with a database-style join.\n merge_asof : Merge on nearest keys.\n\n Examples\n --------\n >>> from pandas import merge_ordered\n >>> df1 = pd.DataFrame(\n ... {\n ... "key": ["a", "c", "e", "a", "c", "e"],\n ... "lvalue": [1, 2, 3, 1, 2, 3],\n ... "group": ["a", "a", "a", "b", "b", "b"]\n ... }\n ... )\n >>> df1\n key lvalue group\n 0 a 1 a\n 1 c 2 a\n 2 e 3 a\n 3 a 1 b\n 4 c 2 b\n 5 e 3 b\n\n >>> df2 = pd.DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]})\n >>> df2\n key rvalue\n 0 b 1\n 1 c 2\n 2 d 3\n\n >>> merge_ordered(df1, df2, fill_method="ffill", left_by="group")\n key lvalue group rvalue\n 0 a 1 a NaN\n 1 b 1 a 1.0\n 2 c 2 a 2.0\n 3 d 2 a 3.0\n 4 e 3 a 3.0\n 5 a 1 b NaN\n 6 b 1 b 1.0\n 7 c 2 b 2.0\n 8 d 2 b 3.0\n 9 e 3 b 3.0\n """\n\n def _merger(x, y) -> DataFrame:\n # perform the ordered merge operation\n op = _OrderedMerge(\n x,\n y,\n on=on,\n left_on=left_on,\n right_on=right_on,\n suffixes=suffixes,\n fill_method=fill_method,\n how=how,\n )\n return op.get_result()\n\n if left_by is not None and right_by is not None:\n raise ValueError("Can only group either left or right frames")\n if left_by is not None:\n if isinstance(left_by, str):\n left_by = [left_by]\n check = set(left_by).difference(left.columns)\n if len(check) != 0:\n raise KeyError(f"{check} not found in left columns")\n result, _ = _groupby_and_merge(left_by, left, right, lambda x, y: _merger(x, y))\n elif right_by is not None:\n if isinstance(right_by, str):\n right_by = [right_by]\n check = set(right_by).difference(right.columns)\n if len(check) != 0:\n raise KeyError(f"{check} not found in right columns")\n result, _ = _groupby_and_merge(\n right_by, right, left, lambda x, y: _merger(y, x)\n )\n else:\n result = _merger(left, right)\n return result\n\n\ndef merge_asof(\n left: DataFrame | Series,\n right: DataFrame | Series,\n on: IndexLabel | None = None,\n left_on: IndexLabel | None = None,\n right_on: IndexLabel | None = None,\n left_index: bool = False,\n right_index: bool = False,\n by=None,\n left_by=None,\n right_by=None,\n suffixes: Suffixes = ("_x", "_y"),\n tolerance: int | Timedelta | None = None,\n allow_exact_matches: bool = True,\n direction: str = "backward",\n) -> DataFrame:\n """\n Perform a merge by key distance.\n\n This is similar to a left-join except that we match on nearest\n key rather than equal keys. Both DataFrames must be sorted by the key.\n\n For each row in the left DataFrame:\n\n - A "backward" search selects the last row in the right DataFrame whose\n 'on' key is less than or equal to the left's key.\n\n - A "forward" search selects the first row in the right DataFrame whose\n 'on' key is greater than or equal to the left's key.\n\n - A "nearest" search selects the row in the right DataFrame whose 'on'\n key is closest in absolute distance to the left's key.\n\n Optionally match on equivalent keys with 'by' before searching with 'on'.\n\n Parameters\n ----------\n left : DataFrame or named Series\n right : DataFrame or named Series\n on : label\n Field name to join on. Must be found in both DataFrames.\n The data MUST be ordered. Furthermore this must be a numeric column,\n such as datetimelike, integer, or float. On or left_on/right_on\n must be given.\n left_on : label\n Field name to join on in left DataFrame.\n right_on : label\n Field name to join on in right DataFrame.\n left_index : bool\n Use the index of the left DataFrame as the join key.\n right_index : bool\n Use the index of the right DataFrame as the join key.\n by : column name or list of column names\n Match on these columns before performing merge operation.\n left_by : column name\n Field names to match on in the left DataFrame.\n right_by : column name\n Field names to match on in the right DataFrame.\n suffixes : 2-length sequence (tuple, list, ...)\n Suffix to apply to overlapping column names in the left and right\n side, respectively.\n tolerance : int or Timedelta, optional, default None\n Select asof tolerance within this range; must be compatible\n with the merge index.\n allow_exact_matches : bool, default True\n\n - If True, allow matching with the same 'on' value\n (i.e. less-than-or-equal-to / greater-than-or-equal-to)\n - If False, don't match the same 'on' value\n (i.e., strictly less-than / strictly greater-than).\n\n direction : 'backward' (default), 'forward', or 'nearest'\n Whether to search for prior, subsequent, or closest matches.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n merge : Merge with a database-style join.\n merge_ordered : Merge with optional filling/interpolation.\n\n Examples\n --------\n >>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})\n >>> left\n a left_val\n 0 1 a\n 1 5 b\n 2 10 c\n\n >>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})\n >>> right\n a right_val\n 0 1 1\n 1 2 2\n 2 3 3\n 3 6 6\n 4 7 7\n\n >>> pd.merge_asof(left, right, on="a")\n a left_val right_val\n 0 1 a 1\n 1 5 b 3\n 2 10 c 7\n\n >>> pd.merge_asof(left, right, on="a", allow_exact_matches=False)\n a left_val right_val\n 0 1 a NaN\n 1 5 b 3.0\n 2 10 c 7.0\n\n >>> pd.merge_asof(left, right, on="a", direction="forward")\n a left_val right_val\n 0 1 a 1.0\n 1 5 b 6.0\n 2 10 c NaN\n\n >>> pd.merge_asof(left, right, on="a", direction="nearest")\n a left_val right_val\n 0 1 a 1\n 1 5 b 6\n 2 10 c 7\n\n We can use indexed DataFrames as well.\n\n >>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10])\n >>> left\n left_val\n 1 a\n 5 b\n 10 c\n\n >>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])\n >>> right\n right_val\n 1 1\n 2 2\n 3 3\n 6 6\n 7 7\n\n >>> pd.merge_asof(left, right, left_index=True, right_index=True)\n left_val right_val\n 1 a 1\n 5 b 3\n 10 c 7\n\n Here is a real-world times-series example\n\n >>> quotes = pd.DataFrame(\n ... {\n ... "time": [\n ... pd.Timestamp("2016-05-25 13:30:00.023"),\n ... pd.Timestamp("2016-05-25 13:30:00.023"),\n ... pd.Timestamp("2016-05-25 13:30:00.030"),\n ... pd.Timestamp("2016-05-25 13:30:00.041"),\n ... pd.Timestamp("2016-05-25 13:30:00.048"),\n ... pd.Timestamp("2016-05-25 13:30:00.049"),\n ... pd.Timestamp("2016-05-25 13:30:00.072"),\n ... pd.Timestamp("2016-05-25 13:30:00.075")\n ... ],\n ... "ticker": [\n ... "GOOG",\n ... "MSFT",\n ... "MSFT",\n ... "MSFT",\n ... "GOOG",\n ... "AAPL",\n ... "GOOG",\n ... "MSFT"\n ... ],\n ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],\n ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]\n ... }\n ... )\n >>> quotes\n time ticker bid ask\n 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93\n 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96\n 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98\n 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00\n 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93\n 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01\n 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88\n 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03\n\n >>> trades = pd.DataFrame(\n ... {\n ... "time": [\n ... pd.Timestamp("2016-05-25 13:30:00.023"),\n ... pd.Timestamp("2016-05-25 13:30:00.038"),\n ... pd.Timestamp("2016-05-25 13:30:00.048"),\n ... pd.Timestamp("2016-05-25 13:30:00.048"),\n ... pd.Timestamp("2016-05-25 13:30:00.048")\n ... ],\n ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],\n ... "price": [51.95, 51.95, 720.77, 720.92, 98.0],\n ... "quantity": [75, 155, 100, 100, 100]\n ... }\n ... )\n >>> trades\n time ticker price quantity\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155\n 2 2016-05-25 13:30:00.048 GOOG 720.77 100\n 3 2016-05-25 13:30:00.048 GOOG 720.92 100\n 4 2016-05-25 13:30:00.048 AAPL 98.00 100\n\n By default we are taking the asof of the quotes\n\n >>> pd.merge_asof(trades, quotes, on="time", by="ticker")\n time ticker price quantity bid ask\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98\n 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93\n 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93\n 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN\n\n We only asof within 2ms between the quote time and the trade time\n\n >>> pd.merge_asof(\n ... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")\n ... )\n time ticker price quantity bid ask\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN\n 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93\n 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93\n 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN\n\n We only asof within 10ms between the quote time and the trade time\n and we exclude exact matches on time. However *prior* data will\n propagate forward\n\n >>> pd.merge_asof(\n ... trades,\n ... quotes,\n ... on="time",\n ... by="ticker",\n ... tolerance=pd.Timedelta("10ms"),\n ... allow_exact_matches=False\n ... )\n time ticker price quantity bid ask\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98\n 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN\n 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN\n 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN\n """\n op = _AsOfMerge(\n left,\n right,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n by=by,\n left_by=left_by,\n right_by=right_by,\n suffixes=suffixes,\n how="asof",\n tolerance=tolerance,\n allow_exact_matches=allow_exact_matches,\n direction=direction,\n )\n return op.get_result()\n\n\n# TODO: transformations??\n# TODO: only copy DataFrames when modification necessary\nclass _MergeOperation:\n """\n Perform a database (SQL) merge operation between two DataFrame or Series\n objects using either columns as keys or their row indexes\n """\n\n _merge_type = "merge"\n how: JoinHow | Literal["asof"]\n on: IndexLabel | None\n # left_on/right_on may be None when passed, but in validate_specification\n # get replaced with non-None.\n left_on: Sequence[Hashable | AnyArrayLike]\n right_on: Sequence[Hashable | AnyArrayLike]\n left_index: bool\n right_index: bool\n sort: bool\n suffixes: Suffixes\n copy: bool\n indicator: str | bool\n validate: str | None\n join_names: list[Hashable]\n right_join_keys: list[ArrayLike]\n left_join_keys: list[ArrayLike]\n\n def __init__(\n self,\n left: DataFrame | Series,\n right: DataFrame | Series,\n how: JoinHow | Literal["asof"] = "inner",\n on: IndexLabel | AnyArrayLike | None = None,\n left_on: IndexLabel | AnyArrayLike | None = None,\n right_on: IndexLabel | AnyArrayLike | None = None,\n left_index: bool = False,\n right_index: bool = False,\n sort: bool = True,\n suffixes: Suffixes = ("_x", "_y"),\n indicator: str | bool = False,\n validate: str | None = None,\n ) -> None:\n _left = _validate_operand(left)\n _right = _validate_operand(right)\n self.left = self.orig_left = _left\n self.right = self.orig_right = _right\n self.how = how\n\n self.on = com.maybe_make_list(on)\n\n self.suffixes = suffixes\n self.sort = sort or how == "outer"\n\n self.left_index = left_index\n self.right_index = right_index\n\n self.indicator = indicator\n\n if not is_bool(left_index):\n raise ValueError(\n f"left_index parameter must be of type bool, not {type(left_index)}"\n )\n if not is_bool(right_index):\n raise ValueError(\n f"right_index parameter must be of type bool, not {type(right_index)}"\n )\n\n # GH 40993: raise when merging between different levels; enforced in 2.0\n if _left.columns.nlevels != _right.columns.nlevels:\n msg = (\n "Not allowed to merge between different levels. "\n f"({_left.columns.nlevels} levels on the left, "\n f"{_right.columns.nlevels} on the right)"\n )\n raise MergeError(msg)\n\n self.left_on, self.right_on = self._validate_left_right_on(left_on, right_on)\n\n (\n self.left_join_keys,\n self.right_join_keys,\n self.join_names,\n left_drop,\n right_drop,\n ) = self._get_merge_keys()\n\n if left_drop:\n self.left = self.left._drop_labels_or_levels(left_drop)\n\n if right_drop:\n self.right = self.right._drop_labels_or_levels(right_drop)\n\n self._maybe_require_matching_dtypes(self.left_join_keys, self.right_join_keys)\n self._validate_tolerance(self.left_join_keys)\n\n # validate the merge keys dtypes. We may need to coerce\n # to avoid incompatible dtypes\n self._maybe_coerce_merge_keys()\n\n # If argument passed to validate,\n # check if columns specified as unique\n # are in fact unique.\n if validate is not None:\n self._validate_validate_kwd(validate)\n\n def _maybe_require_matching_dtypes(\n self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike]\n ) -> None:\n # Overridden by AsOfMerge\n pass\n\n def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None:\n # Overridden by AsOfMerge\n pass\n\n @final\n def _reindex_and_concat(\n self,\n join_index: Index,\n left_indexer: npt.NDArray[np.intp] | None,\n right_indexer: npt.NDArray[np.intp] | None,\n copy: bool | None,\n ) -> DataFrame:\n """\n reindex along index and concat along columns.\n """\n # Take views so we do not alter the originals\n left = self.left[:]\n right = self.right[:]\n\n llabels, rlabels = _items_overlap_with_suffix(\n self.left._info_axis, self.right._info_axis, self.suffixes\n )\n\n if left_indexer is not None and not is_range_indexer(left_indexer, len(left)):\n # Pinning the index here (and in the right code just below) is not\n # necessary, but makes the `.take` more performant if we have e.g.\n # a MultiIndex for left.index.\n lmgr = left._mgr.reindex_indexer(\n join_index,\n left_indexer,\n axis=1,\n copy=False,\n only_slice=True,\n allow_dups=True,\n use_na_proxy=True,\n )\n left = left._constructor_from_mgr(lmgr, axes=lmgr.axes)\n left.index = join_index\n\n if right_indexer is not None and not is_range_indexer(\n right_indexer, len(right)\n ):\n rmgr = right._mgr.reindex_indexer(\n join_index,\n right_indexer,\n axis=1,\n copy=False,\n only_slice=True,\n allow_dups=True,\n use_na_proxy=True,\n )\n right = right._constructor_from_mgr(rmgr, axes=rmgr.axes)\n right.index = join_index\n\n from pandas import concat\n\n left.columns = llabels\n right.columns = rlabels\n result = concat([left, right], axis=1, copy=copy)\n return result\n\n def get_result(self, copy: bool | None = True) -> DataFrame:\n if self.indicator:\n self.left, self.right = self._indicator_pre_merge(self.left, self.right)\n\n join_index, left_indexer, right_indexer = self._get_join_info()\n\n result = self._reindex_and_concat(\n join_index, left_indexer, right_indexer, copy=copy\n )\n result = result.__finalize__(self, method=self._merge_type)\n\n if self.indicator:\n result = self._indicator_post_merge(result)\n\n self._maybe_add_join_keys(result, left_indexer, right_indexer)\n\n self._maybe_restore_index_levels(result)\n\n return result.__finalize__(self, method="merge")\n\n @final\n @cache_readonly\n def _indicator_name(self) -> str | None:\n if isinstance(self.indicator, str):\n return self.indicator\n elif isinstance(self.indicator, bool):\n return "_merge" if self.indicator else None\n else:\n raise ValueError(\n "indicator option can only accept boolean or string arguments"\n )\n\n @final\n def _indicator_pre_merge(\n self, left: DataFrame, right: DataFrame\n ) -> tuple[DataFrame, DataFrame]:\n columns = left.columns.union(right.columns)\n\n for i in ["_left_indicator", "_right_indicator"]:\n if i in columns:\n raise ValueError(\n "Cannot use `indicator=True` option when "\n f"data contains a column named {i}"\n )\n if self._indicator_name in columns:\n raise ValueError(\n "Cannot use name of an existing column for indicator column"\n )\n\n left = left.copy()\n right = right.copy()\n\n left["_left_indicator"] = 1\n left["_left_indicator"] = left["_left_indicator"].astype("int8")\n\n right["_right_indicator"] = 2\n right["_right_indicator"] = right["_right_indicator"].astype("int8")\n\n return left, right\n\n @final\n def _indicator_post_merge(self, result: DataFrame) -> DataFrame:\n result["_left_indicator"] = result["_left_indicator"].fillna(0)\n result["_right_indicator"] = result["_right_indicator"].fillna(0)\n\n result[self._indicator_name] = Categorical(\n (result["_left_indicator"] + result["_right_indicator"]),\n categories=[1, 2, 3],\n )\n result[self._indicator_name] = result[\n self._indicator_name\n ].cat.rename_categories(["left_only", "right_only", "both"])\n\n result = result.drop(labels=["_left_indicator", "_right_indicator"], axis=1)\n return result\n\n @final\n def _maybe_restore_index_levels(self, result: DataFrame) -> None:\n """\n Restore index levels specified as `on` parameters\n\n Here we check for cases where `self.left_on` and `self.right_on` pairs\n each reference an index level in their respective DataFrames. The\n joined columns corresponding to these pairs are then restored to the\n index of `result`.\n\n **Note:** This method has side effects. It modifies `result` in-place\n\n Parameters\n ----------\n result: DataFrame\n merge result\n\n Returns\n -------\n None\n """\n names_to_restore = []\n for name, left_key, right_key in zip(\n self.join_names, self.left_on, self.right_on\n ):\n if (\n # Argument 1 to "_is_level_reference" of "NDFrame" has incompatible\n # type "Union[Hashable, ExtensionArray, Index, Series]"; expected\n # "Hashable"\n self.orig_left._is_level_reference(left_key) # type: ignore[arg-type]\n # Argument 1 to "_is_level_reference" of "NDFrame" has incompatible\n # type "Union[Hashable, ExtensionArray, Index, Series]"; expected\n # "Hashable"\n and self.orig_right._is_level_reference(\n right_key # type: ignore[arg-type]\n )\n and left_key == right_key\n and name not in result.index.names\n ):\n names_to_restore.append(name)\n\n if names_to_restore:\n result.set_index(names_to_restore, inplace=True)\n\n @final\n def _maybe_add_join_keys(\n self,\n result: DataFrame,\n left_indexer: npt.NDArray[np.intp] | None,\n right_indexer: npt.NDArray[np.intp] | None,\n ) -> None:\n left_has_missing = None\n right_has_missing = None\n\n assert all(isinstance(x, _known) for x in self.left_join_keys)\n\n keys = zip(self.join_names, self.left_on, self.right_on)\n for i, (name, lname, rname) in enumerate(keys):\n if not _should_fill(lname, rname):\n continue\n\n take_left, take_right = None, None\n\n if name in result:\n if left_indexer is not None or right_indexer is not None:\n if name in self.left:\n if left_has_missing is None:\n left_has_missing = (\n False\n if left_indexer is None\n else (left_indexer == -1).any()\n )\n\n if left_has_missing:\n take_right = self.right_join_keys[i]\n\n if result[name].dtype != self.left[name].dtype:\n take_left = self.left[name]._values\n\n elif name in self.right:\n if right_has_missing is None:\n right_has_missing = (\n False\n if right_indexer is None\n else (right_indexer == -1).any()\n )\n\n if right_has_missing:\n take_left = self.left_join_keys[i]\n\n if result[name].dtype != self.right[name].dtype:\n take_right = self.right[name]._values\n\n else:\n take_left = self.left_join_keys[i]\n take_right = self.right_join_keys[i]\n\n if take_left is not None or take_right is not None:\n if take_left is None:\n lvals = result[name]._values\n elif left_indexer is None:\n lvals = take_left\n else:\n # TODO: can we pin down take_left's type earlier?\n take_left = extract_array(take_left, extract_numpy=True)\n lfill = na_value_for_dtype(take_left.dtype)\n lvals = algos.take_nd(take_left, left_indexer, fill_value=lfill)\n\n if take_right is None:\n rvals = result[name]._values\n elif right_indexer is None:\n rvals = take_right\n else:\n # TODO: can we pin down take_right's type earlier?\n taker = extract_array(take_right, extract_numpy=True)\n rfill = na_value_for_dtype(taker.dtype)\n rvals = algos.take_nd(taker, right_indexer, fill_value=rfill)\n\n # if we have an all missing left_indexer\n # make sure to just use the right values or vice-versa\n if left_indexer is not None and (left_indexer == -1).all():\n key_col = Index(rvals)\n result_dtype = rvals.dtype\n elif right_indexer is not None and (right_indexer == -1).all():\n key_col = Index(lvals)\n result_dtype = lvals.dtype\n else:\n key_col = Index(lvals)\n if left_indexer is not None:\n mask_left = left_indexer == -1\n key_col = key_col.where(~mask_left, rvals)\n result_dtype = find_common_type([lvals.dtype, rvals.dtype])\n if (\n lvals.dtype.kind == "M"\n and rvals.dtype.kind == "M"\n and result_dtype.kind == "O"\n ):\n # TODO(non-nano) Workaround for common_type not dealing\n # with different resolutions\n result_dtype = key_col.dtype\n\n if result._is_label_reference(name):\n result[name] = result._constructor_sliced(\n key_col, dtype=result_dtype, index=result.index\n )\n elif result._is_level_reference(name):\n if isinstance(result.index, MultiIndex):\n key_col.name = name\n idx_list = [\n result.index.get_level_values(level_name)\n if level_name != name\n else key_col\n for level_name in result.index.names\n ]\n\n result.set_index(idx_list, inplace=True)\n else:\n result.index = Index(key_col, name=name)\n else:\n result.insert(i, name or f"key_{i}", key_col)\n\n def _get_join_indexers(\n self,\n ) -> tuple[npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:\n """return the join indexers"""\n # make mypy happy\n assert self.how != "asof"\n return get_join_indexers(\n self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how\n )\n\n @final\n def _get_join_info(\n self,\n ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:\n left_ax = self.left.index\n right_ax = self.right.index\n\n if self.left_index and self.right_index and self.how != "asof":\n join_index, left_indexer, right_indexer = left_ax.join(\n right_ax, how=self.how, return_indexers=True, sort=self.sort\n )\n\n elif self.right_index and self.how == "left":\n join_index, left_indexer, right_indexer = _left_join_on_index(\n left_ax, right_ax, self.left_join_keys, sort=self.sort\n )\n\n elif self.left_index and self.how == "right":\n join_index, right_indexer, left_indexer = _left_join_on_index(\n right_ax, left_ax, self.right_join_keys, sort=self.sort\n )\n else:\n (left_indexer, right_indexer) = self._get_join_indexers()\n\n if self.right_index:\n if len(self.left) > 0:\n join_index = self._create_join_index(\n left_ax,\n right_ax,\n left_indexer,\n how="right",\n )\n elif right_indexer is None:\n join_index = right_ax.copy()\n else:\n join_index = right_ax.take(right_indexer)\n elif self.left_index:\n if self.how == "asof":\n # GH#33463 asof should always behave like a left merge\n join_index = self._create_join_index(\n left_ax,\n right_ax,\n left_indexer,\n how="left",\n )\n\n elif len(self.right) > 0:\n join_index = self._create_join_index(\n right_ax,\n left_ax,\n right_indexer,\n how="left",\n )\n elif left_indexer is None:\n join_index = left_ax.copy()\n else:\n join_index = left_ax.take(left_indexer)\n else:\n n = len(left_ax) if left_indexer is None else len(left_indexer)\n join_index = default_index(n)\n\n return join_index, left_indexer, right_indexer\n\n @final\n def _create_join_index(\n self,\n index: Index,\n other_index: Index,\n indexer: npt.NDArray[np.intp] | None,\n how: JoinHow = "left",\n ) -> Index:\n """\n Create a join index by rearranging one index to match another\n\n Parameters\n ----------\n index : Index\n index being rearranged\n other_index : Index\n used to supply values not found in index\n indexer : np.ndarray[np.intp] or None\n how to rearrange index\n how : str\n Replacement is only necessary if indexer based on other_index.\n\n Returns\n -------\n Index\n """\n if self.how in (how, "outer") and not isinstance(other_index, MultiIndex):\n # if final index requires values in other_index but not target\n # index, indexer may hold missing (-1) values, causing Index.take\n # to take the final value in target index. So, we set the last\n # element to be the desired fill value. We do not use allow_fill\n # and fill_value because it throws a ValueError on integer indices\n mask = indexer == -1\n if np.any(mask):\n fill_value = na_value_for_dtype(index.dtype, compat=False)\n index = index.append(Index([fill_value]))\n if indexer is None:\n return index.copy()\n return index.take(indexer)\n\n @final\n def _get_merge_keys(\n self,\n ) -> tuple[\n list[ArrayLike],\n list[ArrayLike],\n list[Hashable],\n list[Hashable],\n list[Hashable],\n ]:\n """\n Returns\n -------\n left_keys, right_keys, join_names, left_drop, right_drop\n """\n left_keys: list[ArrayLike] = []\n right_keys: list[ArrayLike] = []\n join_names: list[Hashable] = []\n right_drop: list[Hashable] = []\n left_drop: list[Hashable] = []\n\n left, right = self.left, self.right\n\n is_lkey = lambda x: isinstance(x, _known) and len(x) == len(left)\n is_rkey = lambda x: isinstance(x, _known) and len(x) == len(right)\n\n # Note that pd.merge_asof() has separate 'on' and 'by' parameters. A\n # user could, for example, request 'left_index' and 'left_by'. In a\n # regular pd.merge(), users cannot specify both 'left_index' and\n # 'left_on'. (Instead, users have a MultiIndex). That means the\n # self.left_on in this function is always empty in a pd.merge(), but\n # a pd.merge_asof(left_index=True, left_by=...) will result in a\n # self.left_on array with a None in the middle of it. This requires\n # a work-around as designated in the code below.\n # See _validate_left_right_on() for where this happens.\n\n # ugh, spaghetti re #733\n if _any(self.left_on) and _any(self.right_on):\n for lk, rk in zip(self.left_on, self.right_on):\n lk = extract_array(lk, extract_numpy=True)\n rk = extract_array(rk, extract_numpy=True)\n if is_lkey(lk):\n lk = cast(ArrayLike, lk)\n left_keys.append(lk)\n if is_rkey(rk):\n rk = cast(ArrayLike, rk)\n right_keys.append(rk)\n join_names.append(None) # what to do?\n else:\n # Then we're either Hashable or a wrong-length arraylike,\n # the latter of which will raise\n rk = cast(Hashable, rk)\n if rk is not None:\n right_keys.append(right._get_label_or_level_values(rk))\n join_names.append(rk)\n else:\n # work-around for merge_asof(right_index=True)\n right_keys.append(right.index._values)\n join_names.append(right.index.name)\n else:\n if not is_rkey(rk):\n # Then we're either Hashable or a wrong-length arraylike,\n # the latter of which will raise\n rk = cast(Hashable, rk)\n if rk is not None:\n right_keys.append(right._get_label_or_level_values(rk))\n else:\n # work-around for merge_asof(right_index=True)\n right_keys.append(right.index._values)\n if lk is not None and lk == rk: # FIXME: what about other NAs?\n right_drop.append(rk)\n else:\n rk = cast(ArrayLike, rk)\n right_keys.append(rk)\n if lk is not None:\n # Then we're either Hashable or a wrong-length arraylike,\n # the latter of which will raise\n lk = cast(Hashable, lk)\n left_keys.append(left._get_label_or_level_values(lk))\n join_names.append(lk)\n else:\n # work-around for merge_asof(left_index=True)\n left_keys.append(left.index._values)\n join_names.append(left.index.name)\n elif _any(self.left_on):\n for k in self.left_on:\n if is_lkey(k):\n k = extract_array(k, extract_numpy=True)\n k = cast(ArrayLike, k)\n left_keys.append(k)\n join_names.append(None)\n else:\n # Then we're either Hashable or a wrong-length arraylike,\n # the latter of which will raise\n k = cast(Hashable, k)\n left_keys.append(left._get_label_or_level_values(k))\n join_names.append(k)\n if isinstance(self.right.index, MultiIndex):\n right_keys = [\n lev._values.take(lev_codes)\n for lev, lev_codes in zip(\n self.right.index.levels, self.right.index.codes\n )\n ]\n else:\n right_keys = [self.right.index._values]\n elif _any(self.right_on):\n for k in self.right_on:\n k = extract_array(k, extract_numpy=True)\n if is_rkey(k):\n k = cast(ArrayLike, k)\n right_keys.append(k)\n join_names.append(None)\n else:\n # Then we're either Hashable or a wrong-length arraylike,\n # the latter of which will raise\n k = cast(Hashable, k)\n right_keys.append(right._get_label_or_level_values(k))\n join_names.append(k)\n if isinstance(self.left.index, MultiIndex):\n left_keys = [\n lev._values.take(lev_codes)\n for lev, lev_codes in zip(\n self.left.index.levels, self.left.index.codes\n )\n ]\n else:\n left_keys = [self.left.index._values]\n\n return left_keys, right_keys, join_names, left_drop, right_drop\n\n @final\n def _maybe_coerce_merge_keys(self) -> None:\n # we have valid merges but we may have to further\n # coerce these if they are originally incompatible types\n #\n # for example if these are categorical, but are not dtype_equal\n # or if we have object and integer dtypes\n\n for lk, rk, name in zip(\n self.left_join_keys, self.right_join_keys, self.join_names\n ):\n if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):\n continue\n\n lk = extract_array(lk, extract_numpy=True)\n rk = extract_array(rk, extract_numpy=True)\n\n lk_is_cat = isinstance(lk.dtype, CategoricalDtype)\n rk_is_cat = isinstance(rk.dtype, CategoricalDtype)\n lk_is_object_or_string = is_object_dtype(lk.dtype) or is_string_dtype(\n lk.dtype\n )\n rk_is_object_or_string = is_object_dtype(rk.dtype) or is_string_dtype(\n rk.dtype\n )\n\n # if either left or right is a categorical\n # then the must match exactly in categories & ordered\n if lk_is_cat and rk_is_cat:\n lk = cast(Categorical, lk)\n rk = cast(Categorical, rk)\n if lk._categories_match_up_to_permutation(rk):\n continue\n\n elif lk_is_cat or rk_is_cat:\n pass\n\n elif lk.dtype == rk.dtype:\n continue\n\n msg = (\n f"You are trying to merge on {lk.dtype} and {rk.dtype} columns "\n f"for key '{name}'. If you wish to proceed you should use pd.concat"\n )\n\n # if we are numeric, then allow differing\n # kinds to proceed, eg. int64 and int8, int and float\n # further if we are object, but we infer to\n # the same, then proceed\n if is_numeric_dtype(lk.dtype) and is_numeric_dtype(rk.dtype):\n if lk.dtype.kind == rk.dtype.kind:\n continue\n\n if isinstance(lk.dtype, ExtensionDtype) and not isinstance(\n rk.dtype, ExtensionDtype\n ):\n ct = find_common_type([lk.dtype, rk.dtype])\n if isinstance(ct, ExtensionDtype):\n com_cls = ct.construct_array_type()\n rk = com_cls._from_sequence(rk, dtype=ct, copy=False)\n else:\n rk = rk.astype(ct)\n elif isinstance(rk.dtype, ExtensionDtype):\n ct = find_common_type([lk.dtype, rk.dtype])\n if isinstance(ct, ExtensionDtype):\n com_cls = ct.construct_array_type()\n lk = com_cls._from_sequence(lk, dtype=ct, copy=False)\n else:\n lk = lk.astype(ct)\n\n # check whether ints and floats\n if is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype):\n # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int\n with np.errstate(invalid="ignore"):\n # error: Argument 1 to "astype" of "ndarray" has incompatible\n # type "Union[ExtensionDtype, Any, dtype[Any]]"; expected\n # "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]"\n casted = lk.astype(rk.dtype) # type: ignore[arg-type]\n\n mask = ~np.isnan(lk)\n match = lk == casted\n if not match[mask].all():\n warnings.warn(\n "You are merging on int and float "\n "columns where the float values "\n "are not equal to their int representation.",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n continue\n\n if is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype):\n # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int\n with np.errstate(invalid="ignore"):\n # error: Argument 1 to "astype" of "ndarray" has incompatible\n # type "Union[ExtensionDtype, Any, dtype[Any]]"; expected\n # "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]"\n casted = rk.astype(lk.dtype) # type: ignore[arg-type]\n\n mask = ~np.isnan(rk)\n match = rk == casted\n if not match[mask].all():\n warnings.warn(\n "You are merging on int and float "\n "columns where the float values "\n "are not equal to their int representation.",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n continue\n\n # let's infer and see if we are ok\n if lib.infer_dtype(lk, skipna=False) == lib.infer_dtype(\n rk, skipna=False\n ):\n continue\n\n # Check if we are trying to merge on obviously\n # incompatible dtypes GH 9780, GH 15800\n\n # bool values are coerced to object\n elif (lk_is_object_or_string and is_bool_dtype(rk.dtype)) or (\n is_bool_dtype(lk.dtype) and rk_is_object_or_string\n ):\n pass\n\n # object values are allowed to be merged\n elif (lk_is_object_or_string and is_numeric_dtype(rk.dtype)) or (\n is_numeric_dtype(lk.dtype) and rk_is_object_or_string\n ):\n inferred_left = lib.infer_dtype(lk, skipna=False)\n inferred_right = lib.infer_dtype(rk, skipna=False)\n bool_types = ["integer", "mixed-integer", "boolean", "empty"]\n string_types = ["string", "unicode", "mixed", "bytes", "empty"]\n\n # inferred bool\n if inferred_left in bool_types and inferred_right in bool_types:\n pass\n\n # unless we are merging non-string-like with string-like\n elif (\n inferred_left in string_types and inferred_right not in string_types\n ) or (\n inferred_right in string_types and inferred_left not in string_types\n ):\n raise ValueError(msg)\n\n # datetimelikes must match exactly\n elif needs_i8_conversion(lk.dtype) and not needs_i8_conversion(rk.dtype):\n raise ValueError(msg)\n elif not needs_i8_conversion(lk.dtype) and needs_i8_conversion(rk.dtype):\n raise ValueError(msg)\n elif isinstance(lk.dtype, DatetimeTZDtype) and not isinstance(\n rk.dtype, DatetimeTZDtype\n ):\n raise ValueError(msg)\n elif not isinstance(lk.dtype, DatetimeTZDtype) and isinstance(\n rk.dtype, DatetimeTZDtype\n ):\n raise ValueError(msg)\n elif (\n isinstance(lk.dtype, DatetimeTZDtype)\n and isinstance(rk.dtype, DatetimeTZDtype)\n ) or (lk.dtype.kind == "M" and rk.dtype.kind == "M"):\n # allows datetime with different resolutions\n continue\n # datetime and timedelta not allowed\n elif lk.dtype.kind == "M" and rk.dtype.kind == "m":\n raise ValueError(msg)\n elif lk.dtype.kind == "m" and rk.dtype.kind == "M":\n raise ValueError(msg)\n\n elif is_object_dtype(lk.dtype) and is_object_dtype(rk.dtype):\n continue\n\n # Houston, we have a problem!\n # let's coerce to object if the dtypes aren't\n # categorical, otherwise coerce to the category\n # dtype. If we coerced categories to object,\n # then we would lose type information on some\n # columns, and end up trying to merge\n # incompatible dtypes. See GH 16900.\n if name in self.left.columns:\n typ = cast(Categorical, lk).categories.dtype if lk_is_cat else object\n self.left = self.left.copy()\n self.left[name] = self.left[name].astype(typ)\n if name in self.right.columns:\n typ = cast(Categorical, rk).categories.dtype if rk_is_cat else object\n self.right = self.right.copy()\n self.right[name] = self.right[name].astype(typ)\n\n def _validate_left_right_on(self, left_on, right_on):\n left_on = com.maybe_make_list(left_on)\n right_on = com.maybe_make_list(right_on)\n\n # Hm, any way to make this logic less complicated??\n if self.on is None and left_on is None and right_on is None:\n if self.left_index and self.right_index:\n left_on, right_on = (), ()\n elif self.left_index:\n raise MergeError("Must pass right_on or right_index=True")\n elif self.right_index:\n raise MergeError("Must pass left_on or left_index=True")\n else:\n # use the common columns\n left_cols = self.left.columns\n right_cols = self.right.columns\n common_cols = left_cols.intersection(right_cols)\n if len(common_cols) == 0:\n raise MergeError(\n "No common columns to perform merge on. "\n f"Merge options: left_on={left_on}, "\n f"right_on={right_on}, "\n f"left_index={self.left_index}, "\n f"right_index={self.right_index}"\n )\n if (\n not left_cols.join(common_cols, how="inner").is_unique\n or not right_cols.join(common_cols, how="inner").is_unique\n ):\n raise MergeError(f"Data columns not unique: {repr(common_cols)}")\n left_on = right_on = common_cols\n elif self.on is not None:\n if left_on is not None or right_on is not None:\n raise MergeError(\n 'Can only pass argument "on" OR "left_on" '\n 'and "right_on", not a combination of both.'\n )\n if self.left_index or self.right_index:\n raise MergeError(\n 'Can only pass argument "on" OR "left_index" '\n 'and "right_index", not a combination of both.'\n )\n left_on = right_on = self.on\n elif left_on is not None:\n if self.left_index:\n raise MergeError(\n 'Can only pass argument "left_on" OR "left_index" not both.'\n )\n if not self.right_index and right_on is None:\n raise MergeError('Must pass "right_on" OR "right_index".')\n n = len(left_on)\n if self.right_index:\n if len(left_on) != self.right.index.nlevels:\n raise ValueError(\n "len(left_on) must equal the number "\n 'of levels in the index of "right"'\n )\n right_on = [None] * n\n elif right_on is not None:\n if self.right_index:\n raise MergeError(\n 'Can only pass argument "right_on" OR "right_index" not both.'\n )\n if not self.left_index and left_on is None:\n raise MergeError('Must pass "left_on" OR "left_index".')\n n = len(right_on)\n if self.left_index:\n if len(right_on) != self.left.index.nlevels:\n raise ValueError(\n "len(right_on) must equal the number "\n 'of levels in the index of "left"'\n )\n left_on = [None] * n\n if len(right_on) != len(left_on):\n raise ValueError("len(right_on) must equal len(left_on)")\n\n return left_on, right_on\n\n @final\n def _validate_validate_kwd(self, validate: str) -> None:\n # Check uniqueness of each\n if self.left_index:\n left_unique = self.orig_left.index.is_unique\n else:\n left_unique = MultiIndex.from_arrays(self.left_join_keys).is_unique\n\n if self.right_index:\n right_unique = self.orig_right.index.is_unique\n else:\n right_unique = MultiIndex.from_arrays(self.right_join_keys).is_unique\n\n # Check data integrity\n if validate in ["one_to_one", "1:1"]:\n if not left_unique and not right_unique:\n raise MergeError(\n "Merge keys are not unique in either left "\n "or right dataset; not a one-to-one merge"\n )\n if not left_unique:\n raise MergeError(\n "Merge keys are not unique in left dataset; not a one-to-one merge"\n )\n if not right_unique:\n raise MergeError(\n "Merge keys are not unique in right dataset; not a one-to-one merge"\n )\n\n elif validate in ["one_to_many", "1:m"]:\n if not left_unique:\n raise MergeError(\n "Merge keys are not unique in left dataset; not a one-to-many merge"\n )\n\n elif validate in ["many_to_one", "m:1"]:\n if not right_unique:\n raise MergeError(\n "Merge keys are not unique in right dataset; "\n "not a many-to-one merge"\n )\n\n elif validate in ["many_to_many", "m:m"]:\n pass\n\n else:\n raise ValueError(\n f'"{validate}" is not a valid argument. '\n "Valid arguments are:\n"\n '- "1:1"\n'\n '- "1:m"\n'\n '- "m:1"\n'\n '- "m:m"\n'\n '- "one_to_one"\n'\n '- "one_to_many"\n'\n '- "many_to_one"\n'\n '- "many_to_many"'\n )\n\n\ndef get_join_indexers(\n left_keys: list[ArrayLike],\n right_keys: list[ArrayLike],\n sort: bool = False,\n how: JoinHow = "inner",\n) -> tuple[npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:\n """\n\n Parameters\n ----------\n left_keys : list[ndarray, ExtensionArray, Index, Series]\n right_keys : list[ndarray, ExtensionArray, Index, Series]\n sort : bool, default False\n how : {'inner', 'outer', 'left', 'right'}, default 'inner'\n\n Returns\n -------\n np.ndarray[np.intp] or None\n Indexer into the left_keys.\n np.ndarray[np.intp] or None\n Indexer into the right_keys.\n """\n assert len(left_keys) == len(\n right_keys\n ), "left_keys and right_keys must be the same length"\n\n # fast-path for empty left/right\n left_n = len(left_keys[0])\n right_n = len(right_keys[0])\n if left_n == 0:\n if how in ["left", "inner"]:\n return _get_empty_indexer()\n elif not sort and how in ["right", "outer"]:\n return _get_no_sort_one_missing_indexer(right_n, True)\n elif right_n == 0:\n if how in ["right", "inner"]:\n return _get_empty_indexer()\n elif not sort and how in ["left", "outer"]:\n return _get_no_sort_one_missing_indexer(left_n, False)\n\n lkey: ArrayLike\n rkey: ArrayLike\n if len(left_keys) > 1:\n # get left & right join labels and num. of levels at each location\n mapped = (\n _factorize_keys(left_keys[n], right_keys[n], sort=sort)\n for n in range(len(left_keys))\n )\n zipped = zip(*mapped)\n llab, rlab, shape = (list(x) for x in zipped)\n\n # get flat i8 keys from label lists\n lkey, rkey = _get_join_keys(llab, rlab, tuple(shape), sort)\n else:\n lkey = left_keys[0]\n rkey = right_keys[0]\n\n left = Index(lkey)\n right = Index(rkey)\n\n if (\n left.is_monotonic_increasing\n and right.is_monotonic_increasing\n and (left.is_unique or right.is_unique)\n ):\n _, lidx, ridx = left.join(right, how=how, return_indexers=True, sort=sort)\n else:\n lidx, ridx = get_join_indexers_non_unique(\n left._values, right._values, sort, how\n )\n\n if lidx is not None and is_range_indexer(lidx, len(left)):\n lidx = None\n if ridx is not None and is_range_indexer(ridx, len(right)):\n ridx = None\n return lidx, ridx\n\n\ndef get_join_indexers_non_unique(\n left: ArrayLike,\n right: ArrayLike,\n sort: bool = False,\n how: JoinHow = "inner",\n) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n """\n Get join indexers for left and right.\n\n Parameters\n ----------\n left : ArrayLike\n right : ArrayLike\n sort : bool, default False\n how : {'inner', 'outer', 'left', 'right'}, default 'inner'\n\n Returns\n -------\n np.ndarray[np.intp]\n Indexer into left.\n np.ndarray[np.intp]\n Indexer into right.\n """\n lkey, rkey, count = _factorize_keys(left, right, sort=sort)\n if how == "left":\n lidx, ridx = libjoin.left_outer_join(lkey, rkey, count, sort=sort)\n elif how == "right":\n ridx, lidx = libjoin.left_outer_join(rkey, lkey, count, sort=sort)\n elif how == "inner":\n lidx, ridx = libjoin.inner_join(lkey, rkey, count, sort=sort)\n elif how == "outer":\n lidx, ridx = libjoin.full_outer_join(lkey, rkey, count)\n return lidx, ridx\n\n\ndef restore_dropped_levels_multijoin(\n left: MultiIndex,\n right: MultiIndex,\n dropped_level_names,\n join_index: Index,\n lindexer: npt.NDArray[np.intp],\n rindexer: npt.NDArray[np.intp],\n) -> tuple[FrozenList, FrozenList, FrozenList]:\n """\n *this is an internal non-public method*\n\n Returns the levels, labels and names of a multi-index to multi-index join.\n Depending on the type of join, this method restores the appropriate\n dropped levels of the joined multi-index.\n The method relies on lindexer, rindexer which hold the index positions of\n left and right, where a join was feasible\n\n Parameters\n ----------\n left : MultiIndex\n left index\n right : MultiIndex\n right index\n dropped_level_names : str array\n list of non-common level names\n join_index : Index\n the index of the join between the\n common levels of left and right\n lindexer : np.ndarray[np.intp]\n left indexer\n rindexer : np.ndarray[np.intp]\n right indexer\n\n Returns\n -------\n levels : list of Index\n levels of combined multiindexes\n labels : np.ndarray[np.intp]\n labels of combined multiindexes\n names : List[Hashable]\n names of combined multiindex levels\n\n """\n\n def _convert_to_multiindex(index: Index) -> MultiIndex:\n if isinstance(index, MultiIndex):\n return index\n else:\n return MultiIndex.from_arrays([index._values], names=[index.name])\n\n # For multi-multi joins with one overlapping level,\n # the returned index if of type Index\n # Assure that join_index is of type MultiIndex\n # so that dropped levels can be appended\n join_index = _convert_to_multiindex(join_index)\n\n join_levels = join_index.levels\n join_codes = join_index.codes\n join_names = join_index.names\n\n # Iterate through the levels that must be restored\n for dropped_level_name in dropped_level_names:\n if dropped_level_name in left.names:\n idx = left\n indexer = lindexer\n else:\n idx = right\n indexer = rindexer\n\n # The index of the level name to be restored\n name_idx = idx.names.index(dropped_level_name)\n\n restore_levels = idx.levels[name_idx]\n # Inject -1 in the codes list where a join was not possible\n # IOW indexer[i]=-1\n codes = idx.codes[name_idx]\n if indexer is None:\n restore_codes = codes\n else:\n restore_codes = algos.take_nd(codes, indexer, fill_value=-1)\n\n # error: Cannot determine type of "__add__"\n join_levels = join_levels + [restore_levels] # type: ignore[has-type]\n join_codes = join_codes + [restore_codes] # type: ignore[has-type]\n join_names = join_names + [dropped_level_name]\n\n return join_levels, join_codes, join_names\n\n\nclass _OrderedMerge(_MergeOperation):\n _merge_type = "ordered_merge"\n\n def __init__(\n self,\n left: DataFrame | Series,\n right: DataFrame | Series,\n on: IndexLabel | None = None,\n left_on: IndexLabel | None = None,\n right_on: IndexLabel | None = None,\n left_index: bool = False,\n right_index: bool = False,\n suffixes: Suffixes = ("_x", "_y"),\n fill_method: str | None = None,\n how: JoinHow | Literal["asof"] = "outer",\n ) -> None:\n self.fill_method = fill_method\n _MergeOperation.__init__(\n self,\n left,\n right,\n on=on,\n left_on=left_on,\n left_index=left_index,\n right_index=right_index,\n right_on=right_on,\n how=how,\n suffixes=suffixes,\n sort=True, # factorize sorts\n )\n\n def get_result(self, copy: bool | None = True) -> DataFrame:\n join_index, left_indexer, right_indexer = self._get_join_info()\n\n left_join_indexer: npt.NDArray[np.intp] | None\n right_join_indexer: npt.NDArray[np.intp] | None\n\n if self.fill_method == "ffill":\n if left_indexer is None:\n left_join_indexer = None\n else:\n left_join_indexer = libjoin.ffill_indexer(left_indexer)\n if right_indexer is None:\n right_join_indexer = None\n else:\n right_join_indexer = libjoin.ffill_indexer(right_indexer)\n elif self.fill_method is None:\n left_join_indexer = left_indexer\n right_join_indexer = right_indexer\n else:\n raise ValueError("fill_method must be 'ffill' or None")\n\n result = self._reindex_and_concat(\n join_index, left_join_indexer, right_join_indexer, copy=copy\n )\n self._maybe_add_join_keys(result, left_indexer, right_indexer)\n\n return result\n\n\ndef _asof_by_function(direction: str):\n name = f"asof_join_{direction}_on_X_by_Y"\n return getattr(libjoin, name, None)\n\n\nclass _AsOfMerge(_OrderedMerge):\n _merge_type = "asof_merge"\n\n def __init__(\n self,\n left: DataFrame | Series,\n right: DataFrame | Series,\n on: IndexLabel | None = None,\n left_on: IndexLabel | None = None,\n right_on: IndexLabel | None = None,\n left_index: bool = False,\n right_index: bool = False,\n by=None,\n left_by=None,\n right_by=None,\n suffixes: Suffixes = ("_x", "_y"),\n how: Literal["asof"] = "asof",\n tolerance=None,\n allow_exact_matches: bool = True,\n direction: str = "backward",\n ) -> None:\n self.by = by\n self.left_by = left_by\n self.right_by = right_by\n self.tolerance = tolerance\n self.allow_exact_matches = allow_exact_matches\n self.direction = direction\n\n # check 'direction' is valid\n if self.direction not in ["backward", "forward", "nearest"]:\n raise MergeError(f"direction invalid: {self.direction}")\n\n # validate allow_exact_matches\n if not is_bool(self.allow_exact_matches):\n msg = (\n "allow_exact_matches must be boolean, "\n f"passed {self.allow_exact_matches}"\n )\n raise MergeError(msg)\n\n _OrderedMerge.__init__(\n self,\n left,\n right,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n how=how,\n suffixes=suffixes,\n fill_method=None,\n )\n\n def _validate_left_right_on(self, left_on, right_on):\n left_on, right_on = super()._validate_left_right_on(left_on, right_on)\n\n # we only allow on to be a single item for on\n if len(left_on) != 1 and not self.left_index:\n raise MergeError("can only asof on a key for left")\n\n if len(right_on) != 1 and not self.right_index:\n raise MergeError("can only asof on a key for right")\n\n if self.left_index and isinstance(self.left.index, MultiIndex):\n raise MergeError("left can only have one index")\n\n if self.right_index and isinstance(self.right.index, MultiIndex):\n raise MergeError("right can only have one index")\n\n # set 'by' columns\n if self.by is not None:\n if self.left_by is not None or self.right_by is not None:\n raise MergeError("Can only pass by OR left_by and right_by")\n self.left_by = self.right_by = self.by\n if self.left_by is None and self.right_by is not None:\n raise MergeError("missing left_by")\n if self.left_by is not None and self.right_by is None:\n raise MergeError("missing right_by")\n\n # GH#29130 Check that merge keys do not have dtype object\n if not self.left_index:\n left_on_0 = left_on[0]\n if isinstance(left_on_0, _known):\n lo_dtype = left_on_0.dtype\n else:\n lo_dtype = (\n self.left._get_label_or_level_values(left_on_0).dtype\n if left_on_0 in self.left.columns\n else self.left.index.get_level_values(left_on_0)\n )\n else:\n lo_dtype = self.left.index.dtype\n\n if not self.right_index:\n right_on_0 = right_on[0]\n if isinstance(right_on_0, _known):\n ro_dtype = right_on_0.dtype\n else:\n ro_dtype = (\n self.right._get_label_or_level_values(right_on_0).dtype\n if right_on_0 in self.right.columns\n else self.right.index.get_level_values(right_on_0)\n )\n else:\n ro_dtype = self.right.index.dtype\n\n if (\n is_object_dtype(lo_dtype)\n or is_object_dtype(ro_dtype)\n or is_string_dtype(lo_dtype)\n or is_string_dtype(ro_dtype)\n ):\n raise MergeError(\n f"Incompatible merge dtype, {repr(ro_dtype)} and "\n f"{repr(lo_dtype)}, both sides must have numeric dtype"\n )\n\n # add 'by' to our key-list so we can have it in the\n # output as a key\n if self.left_by is not None:\n if not is_list_like(self.left_by):\n self.left_by = [self.left_by]\n if not is_list_like(self.right_by):\n self.right_by = [self.right_by]\n\n if len(self.left_by) != len(self.right_by):\n raise MergeError("left_by and right_by must be the same length")\n\n left_on = self.left_by + list(left_on)\n right_on = self.right_by + list(right_on)\n\n return left_on, right_on\n\n def _maybe_require_matching_dtypes(\n self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike]\n ) -> None:\n # TODO: why do we do this for AsOfMerge but not the others?\n\n def _check_dtype_match(left: ArrayLike, right: ArrayLike, i: int):\n if left.dtype != right.dtype:\n if isinstance(left.dtype, CategoricalDtype) and isinstance(\n right.dtype, CategoricalDtype\n ):\n # The generic error message is confusing for categoricals.\n #\n # In this function, the join keys include both the original\n # ones of the merge_asof() call, and also the keys passed\n # to its by= argument. Unordered but equal categories\n # are not supported for the former, but will fail\n # later with a ValueError, so we don't *need* to check\n # for them here.\n msg = (\n f"incompatible merge keys [{i}] {repr(left.dtype)} and "\n f"{repr(right.dtype)}, both sides category, but not equal ones"\n )\n else:\n msg = (\n f"incompatible merge keys [{i}] {repr(left.dtype)} and "\n f"{repr(right.dtype)}, must be the same type"\n )\n raise MergeError(msg)\n\n # validate index types are the same\n for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)):\n _check_dtype_match(lk, rk, i)\n\n if self.left_index:\n lt = self.left.index._values\n else:\n lt = left_join_keys[-1]\n\n if self.right_index:\n rt = self.right.index._values\n else:\n rt = right_join_keys[-1]\n\n _check_dtype_match(lt, rt, 0)\n\n def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None:\n # validate tolerance; datetime.timedelta or Timedelta if we have a DTI\n if self.tolerance is not None:\n if self.left_index:\n lt = self.left.index._values\n else:\n lt = left_join_keys[-1]\n\n msg = (\n f"incompatible tolerance {self.tolerance}, must be compat "\n f"with type {repr(lt.dtype)}"\n )\n\n if needs_i8_conversion(lt.dtype) or (\n isinstance(lt, ArrowExtensionArray) and lt.dtype.kind in "mM"\n ):\n if not isinstance(self.tolerance, datetime.timedelta):\n raise MergeError(msg)\n if self.tolerance < Timedelta(0):\n raise MergeError("tolerance must be positive")\n\n elif is_integer_dtype(lt.dtype):\n if not is_integer(self.tolerance):\n raise MergeError(msg)\n if self.tolerance < 0:\n raise MergeError("tolerance must be positive")\n\n elif is_float_dtype(lt.dtype):\n if not is_number(self.tolerance):\n raise MergeError(msg)\n # error: Unsupported operand types for > ("int" and "Number")\n if self.tolerance < 0: # type: ignore[operator]\n raise MergeError("tolerance must be positive")\n\n else:\n raise MergeError("key must be integer, timestamp or float")\n\n def _convert_values_for_libjoin(\n self, values: AnyArrayLike, side: str\n ) -> np.ndarray:\n # we require sortedness and non-null values in the join keys\n if not Index(values).is_monotonic_increasing:\n if isna(values).any():\n raise ValueError(f"Merge keys contain null values on {side} side")\n raise ValueError(f"{side} keys must be sorted")\n\n if isinstance(values, ArrowExtensionArray):\n values = values._maybe_convert_datelike_array()\n\n if needs_i8_conversion(values.dtype):\n values = values.view("i8")\n\n elif isinstance(values, BaseMaskedArray):\n # we've verified above that no nulls exist\n values = values._data\n elif isinstance(values, ExtensionArray):\n values = values.to_numpy()\n\n # error: Incompatible return value type (got "Union[ExtensionArray,\n # Any, ndarray[Any, Any], ndarray[Any, dtype[Any]], Index, Series]",\n # expected "ndarray[Any, Any]")\n return values # type: ignore[return-value]\n\n def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n """return the join indexers"""\n\n # values to compare\n left_values = (\n self.left.index._values if self.left_index else self.left_join_keys[-1]\n )\n right_values = (\n self.right.index._values if self.right_index else self.right_join_keys[-1]\n )\n\n # _maybe_require_matching_dtypes already checked for dtype matching\n assert left_values.dtype == right_values.dtype\n\n tolerance = self.tolerance\n if tolerance is not None:\n # TODO: can we reuse a tolerance-conversion function from\n # e.g. TimedeltaIndex?\n if needs_i8_conversion(left_values.dtype) or (\n isinstance(left_values, ArrowExtensionArray)\n and left_values.dtype.kind in "mM"\n ):\n tolerance = Timedelta(tolerance)\n # TODO: we have no test cases with PeriodDtype here; probably\n # need to adjust tolerance for that case.\n if left_values.dtype.kind in "mM":\n # Make sure the i8 representation for tolerance\n # matches that for left_values/right_values.\n if isinstance(left_values, ArrowExtensionArray):\n unit = left_values.dtype.pyarrow_dtype.unit\n else:\n unit = ensure_wrapped_if_datetimelike(left_values).unit\n tolerance = tolerance.as_unit(unit)\n\n tolerance = tolerance._value\n\n # initial type conversion as needed\n left_values = self._convert_values_for_libjoin(left_values, "left")\n right_values = self._convert_values_for_libjoin(right_values, "right")\n\n # a "by" parameter requires special handling\n if self.left_by is not None:\n # remove 'on' parameter from values if one existed\n if self.left_index and self.right_index:\n left_join_keys = self.left_join_keys\n right_join_keys = self.right_join_keys\n else:\n left_join_keys = self.left_join_keys[0:-1]\n right_join_keys = self.right_join_keys[0:-1]\n\n mapped = [\n _factorize_keys(\n left_join_keys[n],\n right_join_keys[n],\n sort=False,\n )\n for n in range(len(left_join_keys))\n ]\n\n if len(left_join_keys) == 1:\n left_by_values = mapped[0][0]\n right_by_values = mapped[0][1]\n else:\n arrs = [np.concatenate(m[:2]) for m in mapped]\n shape = tuple(m[2] for m in mapped)\n group_index = get_group_index(\n arrs, shape=shape, sort=False, xnull=False\n )\n left_len = len(left_join_keys[0])\n left_by_values = group_index[:left_len]\n right_by_values = group_index[left_len:]\n\n left_by_values = ensure_int64(left_by_values)\n right_by_values = ensure_int64(right_by_values)\n\n # choose appropriate function by type\n func = _asof_by_function(self.direction)\n return func(\n left_values,\n right_values,\n left_by_values,\n right_by_values,\n self.allow_exact_matches,\n tolerance,\n )\n else:\n # choose appropriate function by type\n func = _asof_by_function(self.direction)\n return func(\n left_values,\n right_values,\n None,\n None,\n self.allow_exact_matches,\n tolerance,\n False,\n )\n\n\ndef _get_multiindex_indexer(\n join_keys: list[ArrayLike], index: MultiIndex, sort: bool\n) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n # left & right join labels and num. of levels at each location\n mapped = (\n _factorize_keys(index.levels[n]._values, join_keys[n], sort=sort)\n for n in range(index.nlevels)\n )\n zipped = zip(*mapped)\n rcodes, lcodes, shape = (list(x) for x in zipped)\n if sort:\n rcodes = list(map(np.take, rcodes, index.codes))\n else:\n i8copy = lambda a: a.astype("i8", subok=False, copy=True)\n rcodes = list(map(i8copy, index.codes))\n\n # fix right labels if there were any nulls\n for i, join_key in enumerate(join_keys):\n mask = index.codes[i] == -1\n if mask.any():\n # check if there already was any nulls at this location\n # if there was, it is factorized to `shape[i] - 1`\n a = join_key[lcodes[i] == shape[i] - 1]\n if a.size == 0 or not a[0] != a[0]:\n shape[i] += 1\n\n rcodes[i][mask] = shape[i] - 1\n\n # get flat i8 join keys\n lkey, rkey = _get_join_keys(lcodes, rcodes, tuple(shape), sort)\n return lkey, rkey\n\n\ndef _get_empty_indexer() -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n """Return empty join indexers."""\n return (\n np.array([], dtype=np.intp),\n np.array([], dtype=np.intp),\n )\n\n\ndef _get_no_sort_one_missing_indexer(\n n: int, left_missing: bool\n) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n """\n Return join indexers where all of one side is selected without sorting\n and none of the other side is selected.\n\n Parameters\n ----------\n n : int\n Length of indexers to create.\n left_missing : bool\n If True, the left indexer will contain only -1's.\n If False, the right indexer will contain only -1's.\n\n Returns\n -------\n np.ndarray[np.intp]\n Left indexer\n np.ndarray[np.intp]\n Right indexer\n """\n idx = np.arange(n, dtype=np.intp)\n idx_missing = np.full(shape=n, fill_value=-1, dtype=np.intp)\n if left_missing:\n return idx_missing, idx\n return idx, idx_missing\n\n\ndef _left_join_on_index(\n left_ax: Index, right_ax: Index, join_keys: list[ArrayLike], sort: bool = False\n) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp]]:\n if isinstance(right_ax, MultiIndex):\n lkey, rkey = _get_multiindex_indexer(join_keys, right_ax, sort=sort)\n else:\n # error: Incompatible types in assignment (expression has type\n # "Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series]",\n # variable has type "ndarray[Any, dtype[signedinteger[Any]]]")\n lkey = join_keys[0] # type: ignore[assignment]\n # error: Incompatible types in assignment (expression has type "Index",\n # variable has type "ndarray[Any, dtype[signedinteger[Any]]]")\n rkey = right_ax._values # type: ignore[assignment]\n\n left_key, right_key, count = _factorize_keys(lkey, rkey, sort=sort)\n left_indexer, right_indexer = libjoin.left_outer_join(\n left_key, right_key, count, sort=sort\n )\n\n if sort or len(left_ax) != len(left_indexer):\n # if asked to sort or there are 1-to-many matches\n join_index = left_ax.take(left_indexer)\n return join_index, left_indexer, right_indexer\n\n # left frame preserves order & length of its index\n return left_ax, None, right_indexer\n\n\ndef _factorize_keys(\n lk: ArrayLike, rk: ArrayLike, sort: bool = True\n) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:\n """\n Encode left and right keys as enumerated types.\n\n This is used to get the join indexers to be used when merging DataFrames.\n\n Parameters\n ----------\n lk : ndarray, ExtensionArray\n Left key.\n rk : ndarray, ExtensionArray\n Right key.\n sort : bool, defaults to True\n If True, the encoding is done such that the unique elements in the\n keys are sorted.\n\n Returns\n -------\n np.ndarray[np.intp]\n Left (resp. right if called with `key='right'`) labels, as enumerated type.\n np.ndarray[np.intp]\n Right (resp. left if called with `key='right'`) labels, as enumerated type.\n int\n Number of unique elements in union of left and right labels.\n\n See Also\n --------\n merge : Merge DataFrame or named Series objects\n with a database-style join.\n algorithms.factorize : Encode the object as an enumerated type\n or categorical variable.\n\n Examples\n --------\n >>> lk = np.array(["a", "c", "b"])\n >>> rk = np.array(["a", "c"])\n\n Here, the unique values are `'a', 'b', 'c'`. With the default\n `sort=True`, the encoding will be `{0: 'a', 1: 'b', 2: 'c'}`:\n\n >>> pd.core.reshape.merge._factorize_keys(lk, rk)\n (array([0, 2, 1]), array([0, 2]), 3)\n\n With the `sort=False`, the encoding will correspond to the order\n in which the unique elements first appear: `{0: 'a', 1: 'c', 2: 'b'}`:\n\n >>> pd.core.reshape.merge._factorize_keys(lk, rk, sort=False)\n (array([0, 1, 2]), array([0, 1]), 3)\n """\n # TODO: if either is a RangeIndex, we can likely factorize more efficiently?\n\n if (\n isinstance(lk.dtype, DatetimeTZDtype) and isinstance(rk.dtype, DatetimeTZDtype)\n ) or (lib.is_np_dtype(lk.dtype, "M") and lib.is_np_dtype(rk.dtype, "M")):\n # Extract the ndarray (UTC-localized) values\n # Note: we dont need the dtypes to match, as these can still be compared\n lk, rk = cast("DatetimeArray", lk)._ensure_matching_resos(rk)\n lk = cast("DatetimeArray", lk)._ndarray\n rk = cast("DatetimeArray", rk)._ndarray\n\n elif (\n isinstance(lk.dtype, CategoricalDtype)\n and isinstance(rk.dtype, CategoricalDtype)\n and lk.dtype == rk.dtype\n ):\n assert isinstance(lk, Categorical)\n assert isinstance(rk, Categorical)\n # Cast rk to encoding so we can compare codes with lk\n\n rk = lk._encode_with_my_categories(rk)\n\n lk = ensure_int64(lk.codes)\n rk = ensure_int64(rk.codes)\n\n elif isinstance(lk, ExtensionArray) and lk.dtype == rk.dtype:\n if (isinstance(lk.dtype, ArrowDtype) and is_string_dtype(lk.dtype)) or (\n isinstance(lk.dtype, StringDtype) and lk.dtype.storage == "pyarrow"\n ):\n import pyarrow as pa\n import pyarrow.compute as pc\n\n len_lk = len(lk)\n lk = lk._pa_array # type: ignore[attr-defined]\n rk = rk._pa_array # type: ignore[union-attr]\n dc = (\n pa.chunked_array(lk.chunks + rk.chunks) # type: ignore[union-attr]\n .combine_chunks()\n .dictionary_encode()\n )\n\n llab, rlab, count = (\n pc.fill_null(dc.indices[slice(len_lk)], -1)\n .to_numpy()\n .astype(np.intp, copy=False),\n pc.fill_null(dc.indices[slice(len_lk, None)], -1)\n .to_numpy()\n .astype(np.intp, copy=False),\n len(dc.dictionary),\n )\n\n if sort:\n uniques = dc.dictionary.to_numpy(zero_copy_only=False)\n llab, rlab = _sort_labels(uniques, llab, rlab)\n\n if dc.null_count > 0:\n lmask = llab == -1\n lany = lmask.any()\n rmask = rlab == -1\n rany = rmask.any()\n if lany:\n np.putmask(llab, lmask, count)\n if rany:\n np.putmask(rlab, rmask, count)\n count += 1\n return llab, rlab, count\n\n if not isinstance(lk, BaseMaskedArray) and not (\n # exclude arrow dtypes that would get cast to object\n isinstance(lk.dtype, ArrowDtype)\n and (\n is_numeric_dtype(lk.dtype.numpy_dtype)\n or is_string_dtype(lk.dtype)\n and not sort\n )\n ):\n lk, _ = lk._values_for_factorize()\n\n # error: Item "ndarray" of "Union[Any, ndarray]" has no attribute\n # "_values_for_factorize"\n rk, _ = rk._values_for_factorize() # type: ignore[union-attr]\n\n if needs_i8_conversion(lk.dtype) and lk.dtype == rk.dtype:\n # GH#23917 TODO: Needs tests for non-matching dtypes\n # GH#23917 TODO: needs tests for case where lk is integer-dtype\n # and rk is datetime-dtype\n lk = np.asarray(lk, dtype=np.int64)\n rk = np.asarray(rk, dtype=np.int64)\n\n klass, lk, rk = _convert_arrays_and_get_rizer_klass(lk, rk)\n\n rizer = klass(max(len(lk), len(rk)))\n\n if isinstance(lk, BaseMaskedArray):\n assert isinstance(rk, BaseMaskedArray)\n llab = rizer.factorize(lk._data, mask=lk._mask)\n rlab = rizer.factorize(rk._data, mask=rk._mask)\n elif isinstance(lk, ArrowExtensionArray):\n assert isinstance(rk, ArrowExtensionArray)\n # we can only get here with numeric dtypes\n # TODO: Remove when we have a Factorizer for Arrow\n llab = rizer.factorize(\n lk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype), mask=lk.isna()\n )\n rlab = rizer.factorize(\n rk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype), mask=rk.isna()\n )\n else:\n # Argument 1 to "factorize" of "ObjectFactorizer" has incompatible type\n # "Union[ndarray[Any, dtype[signedinteger[_64Bit]]],\n # ndarray[Any, dtype[object_]]]"; expected "ndarray[Any, dtype[object_]]"\n llab = rizer.factorize(lk) # type: ignore[arg-type]\n rlab = rizer.factorize(rk) # type: ignore[arg-type]\n assert llab.dtype == np.dtype(np.intp), llab.dtype\n assert rlab.dtype == np.dtype(np.intp), rlab.dtype\n\n count = rizer.get_count()\n\n if sort:\n uniques = rizer.uniques.to_array()\n llab, rlab = _sort_labels(uniques, llab, rlab)\n\n # NA group\n lmask = llab == -1\n lany = lmask.any()\n rmask = rlab == -1\n rany = rmask.any()\n\n if lany or rany:\n if lany:\n np.putmask(llab, lmask, count)\n if rany:\n np.putmask(rlab, rmask, count)\n count += 1\n\n return llab, rlab, count\n\n\ndef _convert_arrays_and_get_rizer_klass(\n lk: ArrayLike, rk: ArrayLike\n) -> tuple[type[libhashtable.Factorizer], ArrayLike, ArrayLike]:\n klass: type[libhashtable.Factorizer]\n if is_numeric_dtype(lk.dtype):\n if lk.dtype != rk.dtype:\n dtype = find_common_type([lk.dtype, rk.dtype])\n if isinstance(dtype, ExtensionDtype):\n cls = dtype.construct_array_type()\n if not isinstance(lk, ExtensionArray):\n lk = cls._from_sequence(lk, dtype=dtype, copy=False)\n else:\n lk = lk.astype(dtype, copy=False)\n\n if not isinstance(rk, ExtensionArray):\n rk = cls._from_sequence(rk, dtype=dtype, copy=False)\n else:\n rk = rk.astype(dtype, copy=False)\n else:\n lk = lk.astype(dtype, copy=False)\n rk = rk.astype(dtype, copy=False)\n if isinstance(lk, BaseMaskedArray):\n # Invalid index type "type" for "Dict[Type[object], Type[Factorizer]]";\n # expected type "Type[object]"\n klass = _factorizers[lk.dtype.type] # type: ignore[index]\n elif isinstance(lk.dtype, ArrowDtype):\n klass = _factorizers[lk.dtype.numpy_dtype.type]\n else:\n klass = _factorizers[lk.dtype.type]\n\n else:\n klass = libhashtable.ObjectFactorizer\n lk = ensure_object(lk)\n rk = ensure_object(rk)\n return klass, lk, rk\n\n\ndef _sort_labels(\n uniques: np.ndarray, left: npt.NDArray[np.intp], right: npt.NDArray[np.intp]\n) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n llength = len(left)\n labels = np.concatenate([left, right])\n\n _, new_labels = algos.safe_sort(uniques, labels, use_na_sentinel=True)\n new_left, new_right = new_labels[:llength], new_labels[llength:]\n\n return new_left, new_right\n\n\ndef _get_join_keys(\n llab: list[npt.NDArray[np.int64 | np.intp]],\n rlab: list[npt.NDArray[np.int64 | np.intp]],\n shape: Shape,\n sort: bool,\n) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]:\n # how many levels can be done without overflow\n nlev = next(\n lev\n for lev in range(len(shape), 0, -1)\n if not is_int64_overflow_possible(shape[:lev])\n )\n\n # get keys for the first `nlev` levels\n stride = np.prod(shape[1:nlev], dtype="i8")\n lkey = stride * llab[0].astype("i8", subok=False, copy=False)\n rkey = stride * rlab[0].astype("i8", subok=False, copy=False)\n\n for i in range(1, nlev):\n with np.errstate(divide="ignore"):\n stride //= shape[i]\n lkey += llab[i] * stride\n rkey += rlab[i] * stride\n\n if nlev == len(shape): # all done!\n return lkey, rkey\n\n # densify current keys to avoid overflow\n lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)\n\n llab = [lkey] + llab[nlev:]\n rlab = [rkey] + rlab[nlev:]\n shape = (count,) + shape[nlev:]\n\n return _get_join_keys(llab, rlab, shape, sort)\n\n\ndef _should_fill(lname, rname) -> bool:\n if not isinstance(lname, str) or not isinstance(rname, str):\n return True\n return lname == rname\n\n\ndef _any(x) -> bool:\n return x is not None and com.any_not_none(*x)\n\n\ndef _validate_operand(obj: DataFrame | Series) -> DataFrame:\n if isinstance(obj, ABCDataFrame):\n return obj\n elif isinstance(obj, ABCSeries):\n if obj.name is None:\n raise ValueError("Cannot merge a Series without a name")\n return obj.to_frame()\n else:\n raise TypeError(\n f"Can only merge Series or DataFrame objects, a {type(obj)} was passed"\n )\n\n\ndef _items_overlap_with_suffix(\n left: Index, right: Index, suffixes: Suffixes\n) -> tuple[Index, Index]:\n """\n Suffixes type validation.\n\n If two indices overlap, add suffixes to overlapping entries.\n\n If corresponding suffix is empty, the entry is simply converted to string.\n\n """\n if not is_list_like(suffixes, allow_sets=False) or isinstance(suffixes, dict):\n raise TypeError(\n f"Passing 'suffixes' as a {type(suffixes)}, is not supported. "\n "Provide 'suffixes' as a tuple instead."\n )\n\n to_rename = left.intersection(right)\n if len(to_rename) == 0:\n return left, right\n\n lsuffix, rsuffix = suffixes\n\n if not lsuffix and not rsuffix:\n raise ValueError(f"columns overlap but no suffix specified: {to_rename}")\n\n def renamer(x, suffix: str | None):\n """\n Rename the left and right indices.\n\n If there is overlap, and suffix is not None, add\n suffix, otherwise, leave it as-is.\n\n Parameters\n ----------\n x : original column name\n suffix : str or None\n\n Returns\n -------\n x : renamed column name\n """\n if x in to_rename and suffix is not None:\n return f"{x}{suffix}"\n return x\n\n lrenamer = partial(renamer, suffix=lsuffix)\n rrenamer = partial(renamer, suffix=rsuffix)\n\n llabels = left._transform_index(lrenamer)\n rlabels = right._transform_index(rrenamer)\n\n dups = []\n if not llabels.is_unique:\n # Only warn when duplicates are caused because of suffixes, already duplicated\n # columns in origin should not warn\n dups = llabels[(llabels.duplicated()) & (~left.duplicated())].tolist()\n if not rlabels.is_unique:\n dups.extend(rlabels[(rlabels.duplicated()) & (~right.duplicated())].tolist())\n if dups:\n raise MergeError(\n f"Passing 'suffixes' which cause duplicate columns {set(dups)} is "\n f"not allowed.",\n )\n\n return llabels, rlabels\n | .venv\Lib\site-packages\pandas\core\reshape\merge.py | merge.py | Python | 99,554 | 0.75 | 0.131112 | 0.082328 | node-utils | 68 | 2024-05-01T20:14:32.346811 | BSD-3-Clause | false | 310c257f26b23d27a0ee68d42f311a06 |
from __future__ import annotations\n\nfrom collections.abc import (\n Hashable,\n Sequence,\n)\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Literal,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.cast import maybe_downcast_to_dtype\nfrom pandas.core.dtypes.common import (\n is_list_like,\n is_nested_list_like,\n is_scalar,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\n\nimport pandas.core.common as com\nfrom pandas.core.frame import _shared_docs\nfrom pandas.core.groupby import Grouper\nfrom pandas.core.indexes.api import (\n Index,\n MultiIndex,\n get_objs_combined_axis,\n)\nfrom pandas.core.reshape.concat import concat\nfrom pandas.core.reshape.util import cartesian_product\nfrom pandas.core.series import Series\n\nif TYPE_CHECKING:\n from pandas._typing import (\n AggFuncType,\n AggFuncTypeBase,\n AggFuncTypeDict,\n IndexLabel,\n )\n\n from pandas import DataFrame\n\n\n# Note: We need to make sure `frame` is imported before `pivot`, otherwise\n# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency\n@Substitution("\ndata : DataFrame")\n@Appender(_shared_docs["pivot_table"], indents=1)\ndef pivot_table(\n data: DataFrame,\n values=None,\n index=None,\n columns=None,\n aggfunc: AggFuncType = "mean",\n fill_value=None,\n margins: bool = False,\n dropna: bool = True,\n margins_name: Hashable = "All",\n observed: bool | lib.NoDefault = lib.no_default,\n sort: bool = True,\n) -> DataFrame:\n index = _convert_by(index)\n columns = _convert_by(columns)\n\n if isinstance(aggfunc, list):\n pieces: list[DataFrame] = []\n keys = []\n for func in aggfunc:\n _table = __internal_pivot_table(\n data,\n values=values,\n index=index,\n columns=columns,\n fill_value=fill_value,\n aggfunc=func,\n margins=margins,\n dropna=dropna,\n margins_name=margins_name,\n observed=observed,\n sort=sort,\n )\n pieces.append(_table)\n keys.append(getattr(func, "__name__", func))\n\n table = concat(pieces, keys=keys, axis=1)\n return table.__finalize__(data, method="pivot_table")\n\n table = __internal_pivot_table(\n data,\n values,\n index,\n columns,\n aggfunc,\n fill_value,\n margins,\n dropna,\n margins_name,\n observed,\n sort,\n )\n return table.__finalize__(data, method="pivot_table")\n\n\ndef __internal_pivot_table(\n data: DataFrame,\n values,\n index,\n columns,\n aggfunc: AggFuncTypeBase | AggFuncTypeDict,\n fill_value,\n margins: bool,\n dropna: bool,\n margins_name: Hashable,\n observed: bool | lib.NoDefault,\n sort: bool,\n) -> DataFrame:\n """\n Helper of :func:`pandas.pivot_table` for any non-list ``aggfunc``.\n """\n keys = index + columns\n\n values_passed = values is not None\n if values_passed:\n if is_list_like(values):\n values_multi = True\n values = list(values)\n else:\n values_multi = False\n values = [values]\n\n # GH14938 Make sure value labels are in data\n for i in values:\n if i not in data:\n raise KeyError(i)\n\n to_filter = []\n for x in keys + values:\n if isinstance(x, Grouper):\n x = x.key\n try:\n if x in data:\n to_filter.append(x)\n except TypeError:\n pass\n if len(to_filter) < len(data.columns):\n data = data[to_filter]\n\n else:\n values = data.columns\n for key in keys:\n try:\n values = values.drop(key)\n except (TypeError, ValueError, KeyError):\n pass\n values = list(values)\n\n observed_bool = False if observed is lib.no_default else observed\n grouped = data.groupby(keys, observed=observed_bool, sort=sort, dropna=dropna)\n if observed is lib.no_default and any(\n ping._passed_categorical for ping in grouped._grouper.groupings\n ):\n warnings.warn(\n "The default value of observed=False is deprecated and will change "\n "to observed=True in a future version of pandas. Specify "\n "observed=False to silence this warning and retain the current behavior",\n category=FutureWarning,\n stacklevel=find_stack_level(),\n )\n agged = grouped.agg(aggfunc)\n\n if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):\n agged = agged.dropna(how="all")\n\n table = agged\n\n # GH17038, this check should only happen if index is defined (not None)\n if table.index.nlevels > 1 and index:\n # Related GH #17123\n # If index_names are integers, determine whether the integers refer\n # to the level position or name.\n index_names = agged.index.names[: len(index)]\n to_unstack = []\n for i in range(len(index), len(keys)):\n name = agged.index.names[i]\n if name is None or name in index_names:\n to_unstack.append(i)\n else:\n to_unstack.append(name)\n table = agged.unstack(to_unstack, fill_value=fill_value)\n\n if not dropna:\n if isinstance(table.index, MultiIndex):\n m = MultiIndex.from_arrays(\n cartesian_product(table.index.levels), names=table.index.names\n )\n table = table.reindex(m, axis=0, fill_value=fill_value)\n\n if isinstance(table.columns, MultiIndex):\n m = MultiIndex.from_arrays(\n cartesian_product(table.columns.levels), names=table.columns.names\n )\n table = table.reindex(m, axis=1, fill_value=fill_value)\n\n if sort is True and isinstance(table, ABCDataFrame):\n table = table.sort_index(axis=1)\n\n if fill_value is not None:\n table = table.fillna(fill_value)\n if aggfunc is len and not observed and lib.is_integer(fill_value):\n # TODO: can we avoid this? this used to be handled by\n # downcast="infer" in fillna\n table = table.astype(np.int64)\n\n if margins:\n if dropna:\n data = data[data.notna().all(axis=1)]\n table = _add_margins(\n table,\n data,\n values,\n rows=index,\n cols=columns,\n aggfunc=aggfunc,\n observed=dropna,\n margins_name=margins_name,\n fill_value=fill_value,\n )\n\n # discard the top level\n if values_passed and not values_multi and table.columns.nlevels > 1:\n table.columns = table.columns.droplevel(0)\n if len(index) == 0 and len(columns) > 0:\n table = table.T\n\n # GH 15193 Make sure empty columns are removed if dropna=True\n if isinstance(table, ABCDataFrame) and dropna:\n table = table.dropna(how="all", axis=1)\n\n return table\n\n\ndef _add_margins(\n table: DataFrame | Series,\n data: DataFrame,\n values,\n rows,\n cols,\n aggfunc,\n observed: bool,\n margins_name: Hashable = "All",\n fill_value=None,\n):\n if not isinstance(margins_name, str):\n raise ValueError("margins_name argument must be a string")\n\n msg = f'Conflicting name "{margins_name}" in margins'\n for level in table.index.names:\n if margins_name in table.index.get_level_values(level):\n raise ValueError(msg)\n\n grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name)\n\n if table.ndim == 2:\n # i.e. DataFrame\n for level in table.columns.names[1:]:\n if margins_name in table.columns.get_level_values(level):\n raise ValueError(msg)\n\n key: str | tuple[str, ...]\n if len(rows) > 1:\n key = (margins_name,) + ("",) * (len(rows) - 1)\n else:\n key = margins_name\n\n if not values and isinstance(table, ABCSeries):\n # If there are no values and the table is a series, then there is only\n # one column in the data. Compute grand margin and return it.\n return table._append(table._constructor({key: grand_margin[margins_name]}))\n\n elif values:\n marginal_result_set = _generate_marginal_results(\n table, data, values, rows, cols, aggfunc, observed, margins_name\n )\n if not isinstance(marginal_result_set, tuple):\n return marginal_result_set\n result, margin_keys, row_margin = marginal_result_set\n else:\n # no values, and table is a DataFrame\n assert isinstance(table, ABCDataFrame)\n marginal_result_set = _generate_marginal_results_without_values(\n table, data, rows, cols, aggfunc, observed, margins_name\n )\n if not isinstance(marginal_result_set, tuple):\n return marginal_result_set\n result, margin_keys, row_margin = marginal_result_set\n\n row_margin = row_margin.reindex(result.columns, fill_value=fill_value)\n # populate grand margin\n for k in margin_keys:\n if isinstance(k, str):\n row_margin[k] = grand_margin[k]\n else:\n row_margin[k] = grand_margin[k[0]]\n\n from pandas import DataFrame\n\n margin_dummy = DataFrame(row_margin, columns=Index([key])).T\n\n row_names = result.index.names\n # check the result column and leave floats\n\n for dtype in set(result.dtypes):\n if isinstance(dtype, ExtensionDtype):\n # Can hold NA already\n continue\n\n cols = result.select_dtypes([dtype]).columns\n margin_dummy[cols] = margin_dummy[cols].apply(\n maybe_downcast_to_dtype, args=(dtype,)\n )\n result = result._append(margin_dummy)\n result.index.names = row_names\n\n return result\n\n\ndef _compute_grand_margin(\n data: DataFrame, values, aggfunc, margins_name: Hashable = "All"\n):\n if values:\n grand_margin = {}\n for k, v in data[values].items():\n try:\n if isinstance(aggfunc, str):\n grand_margin[k] = getattr(v, aggfunc)()\n elif isinstance(aggfunc, dict):\n if isinstance(aggfunc[k], str):\n grand_margin[k] = getattr(v, aggfunc[k])()\n else:\n grand_margin[k] = aggfunc[k](v)\n else:\n grand_margin[k] = aggfunc(v)\n except TypeError:\n pass\n return grand_margin\n else:\n return {margins_name: aggfunc(data.index)}\n\n\ndef _generate_marginal_results(\n table,\n data: DataFrame,\n values,\n rows,\n cols,\n aggfunc,\n observed: bool,\n margins_name: Hashable = "All",\n):\n margin_keys: list | Index\n if len(cols) > 0:\n # need to "interleave" the margins\n table_pieces = []\n margin_keys = []\n\n def _all_key(key):\n return (key, margins_name) + ("",) * (len(cols) - 1)\n\n if len(rows) > 0:\n margin = data[rows + values].groupby(rows, observed=observed).agg(aggfunc)\n cat_axis = 1\n\n for key, piece in table.T.groupby(level=0, observed=observed):\n piece = piece.T\n all_key = _all_key(key)\n\n # we are going to mutate this, so need to copy!\n piece = piece.copy()\n piece[all_key] = margin[key]\n\n table_pieces.append(piece)\n margin_keys.append(all_key)\n else:\n from pandas import DataFrame\n\n cat_axis = 0\n for key, piece in table.groupby(level=0, observed=observed):\n if len(cols) > 1:\n all_key = _all_key(key)\n else:\n all_key = margins_name\n table_pieces.append(piece)\n # GH31016 this is to calculate margin for each group, and assign\n # corresponded key as index\n transformed_piece = DataFrame(piece.apply(aggfunc)).T\n if isinstance(piece.index, MultiIndex):\n # We are adding an empty level\n transformed_piece.index = MultiIndex.from_tuples(\n [all_key], names=piece.index.names + [None]\n )\n else:\n transformed_piece.index = Index([all_key], name=piece.index.name)\n\n # append piece for margin into table_piece\n table_pieces.append(transformed_piece)\n margin_keys.append(all_key)\n\n if not table_pieces:\n # GH 49240\n return table\n else:\n result = concat(table_pieces, axis=cat_axis)\n\n if len(rows) == 0:\n return result\n else:\n result = table\n margin_keys = table.columns\n\n if len(cols) > 0:\n row_margin = data[cols + values].groupby(cols, observed=observed).agg(aggfunc)\n row_margin = row_margin.stack(future_stack=True)\n\n # GH#26568. Use names instead of indices in case of numeric names\n new_order_indices = [len(cols)] + list(range(len(cols)))\n new_order_names = [row_margin.index.names[i] for i in new_order_indices]\n row_margin.index = row_margin.index.reorder_levels(new_order_names)\n else:\n row_margin = data._constructor_sliced(np.nan, index=result.columns)\n\n return result, margin_keys, row_margin\n\n\ndef _generate_marginal_results_without_values(\n table: DataFrame,\n data: DataFrame,\n rows,\n cols,\n aggfunc,\n observed: bool,\n margins_name: Hashable = "All",\n):\n margin_keys: list | Index\n if len(cols) > 0:\n # need to "interleave" the margins\n margin_keys = []\n\n def _all_key():\n if len(cols) == 1:\n return margins_name\n return (margins_name,) + ("",) * (len(cols) - 1)\n\n if len(rows) > 0:\n margin = data.groupby(rows, observed=observed)[rows].apply(aggfunc)\n all_key = _all_key()\n table[all_key] = margin\n result = table\n margin_keys.append(all_key)\n\n else:\n margin = data.groupby(level=0, axis=0, observed=observed).apply(aggfunc)\n all_key = _all_key()\n table[all_key] = margin\n result = table\n margin_keys.append(all_key)\n return result\n else:\n result = table\n margin_keys = table.columns\n\n if len(cols):\n row_margin = data.groupby(cols, observed=observed)[cols].apply(aggfunc)\n else:\n row_margin = Series(np.nan, index=result.columns)\n\n return result, margin_keys, row_margin\n\n\ndef _convert_by(by):\n if by is None:\n by = []\n elif (\n is_scalar(by)\n or isinstance(by, (np.ndarray, Index, ABCSeries, Grouper))\n or callable(by)\n ):\n by = [by]\n else:\n by = list(by)\n return by\n\n\n@Substitution("\ndata : DataFrame")\n@Appender(_shared_docs["pivot"], indents=1)\ndef pivot(\n data: DataFrame,\n *,\n columns: IndexLabel,\n index: IndexLabel | lib.NoDefault = lib.no_default,\n values: IndexLabel | lib.NoDefault = lib.no_default,\n) -> DataFrame:\n columns_listlike = com.convert_to_list_like(columns)\n\n # If columns is None we will create a MultiIndex level with None as name\n # which might cause duplicated names because None is the default for\n # level names\n data = data.copy(deep=False)\n data.index = data.index.copy()\n data.index.names = [\n name if name is not None else lib.no_default for name in data.index.names\n ]\n\n indexed: DataFrame | Series\n if values is lib.no_default:\n if index is not lib.no_default:\n cols = com.convert_to_list_like(index)\n else:\n cols = []\n\n append = index is lib.no_default\n # error: Unsupported operand types for + ("List[Any]" and "ExtensionArray")\n # error: Unsupported left operand type for + ("ExtensionArray")\n indexed = data.set_index(\n cols + columns_listlike, append=append # type: ignore[operator]\n )\n else:\n index_list: list[Index] | list[Series]\n if index is lib.no_default:\n if isinstance(data.index, MultiIndex):\n # GH 23955\n index_list = [\n data.index.get_level_values(i) for i in range(data.index.nlevels)\n ]\n else:\n index_list = [\n data._constructor_sliced(data.index, name=data.index.name)\n ]\n else:\n index_list = [data[idx] for idx in com.convert_to_list_like(index)]\n\n data_columns = [data[col] for col in columns_listlike]\n index_list.extend(data_columns)\n multiindex = MultiIndex.from_arrays(index_list)\n\n if is_list_like(values) and not isinstance(values, tuple):\n # Exclude tuple because it is seen as a single column name\n values = cast(Sequence[Hashable], values)\n indexed = data._constructor(\n data[values]._values, index=multiindex, columns=values\n )\n else:\n indexed = data._constructor_sliced(data[values]._values, index=multiindex)\n # error: Argument 1 to "unstack" of "DataFrame" has incompatible type "Union\n # [List[Any], ExtensionArray, ndarray[Any, Any], Index, Series]"; expected\n # "Hashable"\n result = indexed.unstack(columns_listlike) # type: ignore[arg-type]\n result.index.names = [\n name if name is not lib.no_default else None for name in result.index.names\n ]\n\n return result\n\n\ndef crosstab(\n index,\n columns,\n values=None,\n rownames=None,\n colnames=None,\n aggfunc=None,\n margins: bool = False,\n margins_name: Hashable = "All",\n dropna: bool = True,\n normalize: bool | Literal[0, 1, "all", "index", "columns"] = False,\n) -> DataFrame:\n """\n Compute a simple cross tabulation of two (or more) factors.\n\n By default, computes a frequency table of the factors unless an\n array of values and an aggregation function are passed.\n\n Parameters\n ----------\n index : array-like, Series, or list of arrays/Series\n Values to group by in the rows.\n columns : array-like, Series, or list of arrays/Series\n Values to group by in the columns.\n values : array-like, optional\n Array of values to aggregate according to the factors.\n Requires `aggfunc` be specified.\n rownames : sequence, default None\n If passed, must match number of row arrays passed.\n colnames : sequence, default None\n If passed, must match number of column arrays passed.\n aggfunc : function, optional\n If specified, requires `values` be specified as well.\n margins : bool, default False\n Add row/column margins (subtotals).\n margins_name : str, default 'All'\n Name of the row/column that will contain the totals\n when margins is True.\n dropna : bool, default True\n Do not include columns whose entries are all NaN.\n normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False\n Normalize by dividing all values by the sum of values.\n\n - If passed 'all' or `True`, will normalize over all values.\n - If passed 'index' will normalize over each row.\n - If passed 'columns' will normalize over each column.\n - If margins is `True`, will also normalize margin values.\n\n Returns\n -------\n DataFrame\n Cross tabulation of the data.\n\n See Also\n --------\n DataFrame.pivot : Reshape data based on column values.\n pivot_table : Create a pivot table as a DataFrame.\n\n Notes\n -----\n Any Series passed will have their name attributes used unless row or column\n names for the cross-tabulation are specified.\n\n Any input passed containing Categorical data will have **all** of its\n categories included in the cross-tabulation, even if the actual data does\n not contain any instances of a particular category.\n\n In the event that there aren't overlapping indexes an empty DataFrame will\n be returned.\n\n Reference :ref:`the user guide <reshaping.crosstabulations>` for more examples.\n\n Examples\n --------\n >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",\n ... "bar", "bar", "foo", "foo", "foo"], dtype=object)\n >>> b = np.array(["one", "one", "one", "two", "one", "one",\n ... "one", "two", "two", "two", "one"], dtype=object)\n >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",\n ... "shiny", "dull", "shiny", "shiny", "shiny"],\n ... dtype=object)\n >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])\n b one two\n c dull shiny dull shiny\n a\n bar 1 2 1 0\n foo 2 2 1 2\n\n Here 'c' and 'f' are not represented in the data and will not be\n shown in the output because dropna is True by default. Set\n dropna=False to preserve categories with no data.\n\n >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])\n >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])\n >>> pd.crosstab(foo, bar)\n col_0 d e\n row_0\n a 1 0\n b 0 1\n >>> pd.crosstab(foo, bar, dropna=False)\n col_0 d e f\n row_0\n a 1 0 0\n b 0 1 0\n c 0 0 0\n """\n if values is None and aggfunc is not None:\n raise ValueError("aggfunc cannot be used without values.")\n\n if values is not None and aggfunc is None:\n raise ValueError("values cannot be used without an aggfunc.")\n\n if not is_nested_list_like(index):\n index = [index]\n if not is_nested_list_like(columns):\n columns = [columns]\n\n common_idx = None\n pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))]\n if pass_objs:\n common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False)\n\n rownames = _get_names(index, rownames, prefix="row")\n colnames = _get_names(columns, colnames, prefix="col")\n\n # duplicate names mapped to unique names for pivot op\n (\n rownames_mapper,\n unique_rownames,\n colnames_mapper,\n unique_colnames,\n ) = _build_names_mapper(rownames, colnames)\n\n from pandas import DataFrame\n\n data = {\n **dict(zip(unique_rownames, index)),\n **dict(zip(unique_colnames, columns)),\n }\n df = DataFrame(data, index=common_idx)\n\n if values is None:\n df["__dummy__"] = 0\n kwargs = {"aggfunc": len, "fill_value": 0}\n else:\n df["__dummy__"] = values\n kwargs = {"aggfunc": aggfunc}\n\n # error: Argument 7 to "pivot_table" of "DataFrame" has incompatible type\n # "**Dict[str, object]"; expected "Union[...]"\n table = df.pivot_table(\n "__dummy__",\n index=unique_rownames,\n columns=unique_colnames,\n margins=margins,\n margins_name=margins_name,\n dropna=dropna,\n observed=False,\n **kwargs, # type: ignore[arg-type]\n )\n\n # Post-process\n if normalize is not False:\n table = _normalize(\n table, normalize=normalize, margins=margins, margins_name=margins_name\n )\n\n table = table.rename_axis(index=rownames_mapper, axis=0)\n table = table.rename_axis(columns=colnames_mapper, axis=1)\n\n return table\n\n\ndef _normalize(\n table: DataFrame, normalize, margins: bool, margins_name: Hashable = "All"\n) -> DataFrame:\n if not isinstance(normalize, (bool, str)):\n axis_subs = {0: "index", 1: "columns"}\n try:\n normalize = axis_subs[normalize]\n except KeyError as err:\n raise ValueError("Not a valid normalize argument") from err\n\n if margins is False:\n # Actual Normalizations\n normalizers: dict[bool | str, Callable] = {\n "all": lambda x: x / x.sum(axis=1).sum(axis=0),\n "columns": lambda x: x / x.sum(),\n "index": lambda x: x.div(x.sum(axis=1), axis=0),\n }\n\n normalizers[True] = normalizers["all"]\n\n try:\n f = normalizers[normalize]\n except KeyError as err:\n raise ValueError("Not a valid normalize argument") from err\n\n table = f(table)\n table = table.fillna(0)\n\n elif margins is True:\n # keep index and column of pivoted table\n table_index = table.index\n table_columns = table.columns\n last_ind_or_col = table.iloc[-1, :].name\n\n # check if margin name is not in (for MI cases) and not equal to last\n # index/column and save the column and index margin\n if (margins_name not in last_ind_or_col) & (margins_name != last_ind_or_col):\n raise ValueError(f"{margins_name} not in pivoted DataFrame")\n column_margin = table.iloc[:-1, -1]\n index_margin = table.iloc[-1, :-1]\n\n # keep the core table\n table = table.iloc[:-1, :-1]\n\n # Normalize core\n table = _normalize(table, normalize=normalize, margins=False)\n\n # Fix Margins\n if normalize == "columns":\n column_margin = column_margin / column_margin.sum()\n table = concat([table, column_margin], axis=1)\n table = table.fillna(0)\n table.columns = table_columns\n\n elif normalize == "index":\n index_margin = index_margin / index_margin.sum()\n table = table._append(index_margin)\n table = table.fillna(0)\n table.index = table_index\n\n elif normalize == "all" or normalize is True:\n column_margin = column_margin / column_margin.sum()\n index_margin = index_margin / index_margin.sum()\n index_margin.loc[margins_name] = 1\n table = concat([table, column_margin], axis=1)\n table = table._append(index_margin)\n\n table = table.fillna(0)\n table.index = table_index\n table.columns = table_columns\n\n else:\n raise ValueError("Not a valid normalize argument")\n\n else:\n raise ValueError("Not a valid margins argument")\n\n return table\n\n\ndef _get_names(arrs, names, prefix: str = "row"):\n if names is None:\n names = []\n for i, arr in enumerate(arrs):\n if isinstance(arr, ABCSeries) and arr.name is not None:\n names.append(arr.name)\n else:\n names.append(f"{prefix}_{i}")\n else:\n if len(names) != len(arrs):\n raise AssertionError("arrays and names must have the same length")\n if not isinstance(names, list):\n names = list(names)\n\n return names\n\n\ndef _build_names_mapper(\n rownames: list[str], colnames: list[str]\n) -> tuple[dict[str, str], list[str], dict[str, str], list[str]]:\n """\n Given the names of a DataFrame's rows and columns, returns a set of unique row\n and column names and mappers that convert to original names.\n\n A row or column name is replaced if it is duplicate among the rows of the inputs,\n among the columns of the inputs or between the rows and the columns.\n\n Parameters\n ----------\n rownames: list[str]\n colnames: list[str]\n\n Returns\n -------\n Tuple(Dict[str, str], List[str], Dict[str, str], List[str])\n\n rownames_mapper: dict[str, str]\n a dictionary with new row names as keys and original rownames as values\n unique_rownames: list[str]\n a list of rownames with duplicate names replaced by dummy names\n colnames_mapper: dict[str, str]\n a dictionary with new column names as keys and original column names as values\n unique_colnames: list[str]\n a list of column names with duplicate names replaced by dummy names\n\n """\n\n def get_duplicates(names):\n seen: set = set()\n return {name for name in names if name not in seen}\n\n shared_names = set(rownames).intersection(set(colnames))\n dup_names = get_duplicates(rownames) | get_duplicates(colnames) | shared_names\n\n rownames_mapper = {\n f"row_{i}": name for i, name in enumerate(rownames) if name in dup_names\n }\n unique_rownames = [\n f"row_{i}" if name in dup_names else name for i, name in enumerate(rownames)\n ]\n\n colnames_mapper = {\n f"col_{i}": name for i, name in enumerate(colnames) if name in dup_names\n }\n unique_colnames = [\n f"col_{i}" if name in dup_names else name for i, name in enumerate(colnames)\n ]\n\n return rownames_mapper, unique_rownames, colnames_mapper, unique_colnames\n | .venv\Lib\site-packages\pandas\core\reshape\pivot.py | pivot.py | Python | 28,917 | 0.95 | 0.155729 | 0.068152 | awesome-app | 526 | 2024-12-24T03:37:29.595643 | GPL-3.0 | false | 2de30ad9167d0d6a7318a0cb980d9f05 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.