diff --git a/.gitattributes b/.gitattributes index b1070399be7a30013f6cfda92820cb6ae44e4f40..06747cb67b398b87ded2668158717e8a642b3ff1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -973,3 +973,4 @@ videollama2/lib/python3.10/site-packages/fontTools/feaLib/lexer.cpython-310-x86_ videollama2/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text videollama2/lib/python3.10/site-packages/altair/vegalite/v5/__pycache__/api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-Bold.ttf filter=lfs diff=lfs merge=lfs -text +parrot/lib/ossl-modules/legacy.so filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/ossl-modules/legacy.so b/parrot/lib/ossl-modules/legacy.so new file mode 100644 index 0000000000000000000000000000000000000000..882bcc96544cf968ac082bdabe91f00d414c9eef --- /dev/null +++ b/parrot/lib/ossl-modules/legacy.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8b8163a4a4cea7693a8d64c064788cc830bfb72e5aae56e9432fee32f5f45a0 +size 158976 diff --git a/parrot/share/terminfo/g/glasstty b/parrot/share/terminfo/g/glasstty new file mode 100644 index 0000000000000000000000000000000000000000..53f72cfd56b966056cb7b8e85bb47dc8c951fd38 Binary files /dev/null and b/parrot/share/terminfo/g/glasstty differ diff --git a/parrot/share/terminfo/g/gnome-rh72 b/parrot/share/terminfo/g/gnome-rh72 new file mode 100644 index 0000000000000000000000000000000000000000..ecf9a0aff2f10f36771426138050eb58bf500116 Binary files /dev/null and b/parrot/share/terminfo/g/gnome-rh72 differ diff --git a/parrot/share/terminfo/g/go225 b/parrot/share/terminfo/g/go225 new file mode 100644 index 0000000000000000000000000000000000000000..d02f256f628d29c83fdb633957231fafb0d7289a Binary files /dev/null and b/parrot/share/terminfo/g/go225 differ diff --git a/parrot/share/terminfo/g/graphos b/parrot/share/terminfo/g/graphos new file mode 100644 index 0000000000000000000000000000000000000000..7a627b3360e82156489541db6917107b0427c7a2 Binary files /dev/null and b/parrot/share/terminfo/g/graphos differ diff --git a/parrot/share/terminfo/g/graphos-30 b/parrot/share/terminfo/g/graphos-30 new file mode 100644 index 0000000000000000000000000000000000000000..c789400c88690eb79d71587daacf6a3a4b1e91ed Binary files /dev/null and b/parrot/share/terminfo/g/graphos-30 differ diff --git a/parrot/share/terminfo/g/gs5430-22 b/parrot/share/terminfo/g/gs5430-22 new file mode 100644 index 0000000000000000000000000000000000000000..bbf0d9a1a0969f9627684f6a45de7e8880107c7e Binary files /dev/null and b/parrot/share/terminfo/g/gs5430-22 differ diff --git a/parrot/share/terminfo/g/gt40 b/parrot/share/terminfo/g/gt40 new file mode 100644 index 0000000000000000000000000000000000000000..ce7f7026d7efd06b394565f90ce0b3cf9b9e3559 Binary files /dev/null and b/parrot/share/terminfo/g/gt40 differ diff --git a/parrot/share/terminfo/g/guru+unk b/parrot/share/terminfo/g/guru+unk new file mode 100644 index 0000000000000000000000000000000000000000..2f6dd54377aa297d466f880928b906055f4a8938 Binary files /dev/null and b/parrot/share/terminfo/g/guru+unk differ diff --git a/parrot/share/terminfo/g/guru-76-w-s b/parrot/share/terminfo/g/guru-76-w-s new file mode 100644 index 0000000000000000000000000000000000000000..bcf43ae8166afdb382f8fdfec7310e443797cf1c Binary files /dev/null and b/parrot/share/terminfo/g/guru-76-w-s differ diff --git a/parrot/share/terminfo/w/wsvt25 b/parrot/share/terminfo/w/wsvt25 new file mode 100644 index 0000000000000000000000000000000000000000..d4035495fa2ea519a10e584f20cb64198f7579ac Binary files /dev/null and b/parrot/share/terminfo/w/wsvt25 differ diff --git a/parrot/share/terminfo/w/wy-99fgta b/parrot/share/terminfo/w/wy-99fgta new file mode 100644 index 0000000000000000000000000000000000000000..6cf0920e3d69f6d38ee368a301f199ea88f865a9 Binary files /dev/null and b/parrot/share/terminfo/w/wy-99fgta differ diff --git a/parrot/share/terminfo/w/wy100q b/parrot/share/terminfo/w/wy100q new file mode 100644 index 0000000000000000000000000000000000000000..846a60fdeb6ce0c1ccbfe95cd3c3fd7500faac9f Binary files /dev/null and b/parrot/share/terminfo/w/wy100q differ diff --git a/parrot/share/terminfo/w/wy160-43 b/parrot/share/terminfo/w/wy160-43 new file mode 100644 index 0000000000000000000000000000000000000000..fda295a5d70c18d2e3146b6542500cd89aa6402a Binary files /dev/null and b/parrot/share/terminfo/w/wy160-43 differ diff --git a/parrot/share/terminfo/w/wy160-43-w b/parrot/share/terminfo/w/wy160-43-w new file mode 100644 index 0000000000000000000000000000000000000000..2e9bc4e043bfe3d280bbe86ef75f14c99ac450bf Binary files /dev/null and b/parrot/share/terminfo/w/wy160-43-w differ diff --git a/parrot/share/terminfo/w/wy325-25w b/parrot/share/terminfo/w/wy325-25w new file mode 100644 index 0000000000000000000000000000000000000000..03b3fa92442de69f51f62ca599428f45e1dd7257 Binary files /dev/null and b/parrot/share/terminfo/w/wy325-25w differ diff --git a/parrot/share/terminfo/w/wy325-42 b/parrot/share/terminfo/w/wy325-42 new file mode 100644 index 0000000000000000000000000000000000000000..7e043b6a76a2e3a329dd3186d1dfa89bb47c7c84 Binary files /dev/null and b/parrot/share/terminfo/w/wy325-42 differ diff --git a/parrot/share/terminfo/w/wy325-wvb b/parrot/share/terminfo/w/wy325-wvb new file mode 100644 index 0000000000000000000000000000000000000000..77a7a602f0bd019de0dd947aea713636b54b4fb7 Binary files /dev/null and b/parrot/share/terminfo/w/wy325-wvb differ diff --git a/parrot/share/terminfo/w/wy520-48 b/parrot/share/terminfo/w/wy520-48 new file mode 100644 index 0000000000000000000000000000000000000000..dc5c947cb92c6d8669db4a1d7ea9a8e6305cc0cb Binary files /dev/null and b/parrot/share/terminfo/w/wy520-48 differ diff --git a/parrot/share/terminfo/w/wy520-48wpc b/parrot/share/terminfo/w/wy520-48wpc new file mode 100644 index 0000000000000000000000000000000000000000..74c787b8c4eeb814c25e857fb24f32e6831db13a Binary files /dev/null and b/parrot/share/terminfo/w/wy520-48wpc differ diff --git a/parrot/share/terminfo/w/wy520-wvb b/parrot/share/terminfo/w/wy520-wvb new file mode 100644 index 0000000000000000000000000000000000000000..cbb555c3dabf8b4bd2f734de8f1b4dbe223d6d0e Binary files /dev/null and b/parrot/share/terminfo/w/wy520-wvb differ diff --git a/parrot/share/terminfo/w/wy60-25-w b/parrot/share/terminfo/w/wy60-25-w new file mode 100644 index 0000000000000000000000000000000000000000..f5f8461ca3da7833f4540ea67b269aeb640e6d87 Binary files /dev/null and b/parrot/share/terminfo/w/wy60-25-w differ diff --git a/parrot/share/terminfo/w/wy60-PC b/parrot/share/terminfo/w/wy60-PC new file mode 100644 index 0000000000000000000000000000000000000000..492a68af7273230060f0eabac5cdfbb07de7a3f6 Binary files /dev/null and b/parrot/share/terminfo/w/wy60-PC differ diff --git a/parrot/share/terminfo/w/wy99a-ansi b/parrot/share/terminfo/w/wy99a-ansi new file mode 100644 index 0000000000000000000000000000000000000000..dc753fcfcf0ba6db753cefeecd24c8052b55228f Binary files /dev/null and b/parrot/share/terminfo/w/wy99a-ansi differ diff --git a/parrot/share/terminfo/w/wy99gt-wvb b/parrot/share/terminfo/w/wy99gt-wvb new file mode 100644 index 0000000000000000000000000000000000000000..17c79a35c1d5dd608fddc86ff05155464e0d670c Binary files /dev/null and b/parrot/share/terminfo/w/wy99gt-wvb differ diff --git a/parrot/share/terminfo/w/wyse-75ap b/parrot/share/terminfo/w/wyse-75ap new file mode 100644 index 0000000000000000000000000000000000000000..9c893d7c5bc37bfbeb0ebcdcc4c9bfe5115ac907 Binary files /dev/null and b/parrot/share/terminfo/w/wyse-75ap differ diff --git a/parrot/share/terminfo/w/wyse150-25-w b/parrot/share/terminfo/w/wyse150-25-w new file mode 100644 index 0000000000000000000000000000000000000000..c6a2ca4b4ac05d812e360251f6cf3e192681c0ab Binary files /dev/null and b/parrot/share/terminfo/w/wyse150-25-w differ diff --git a/parrot/share/terminfo/w/wyse150-vb b/parrot/share/terminfo/w/wyse150-vb new file mode 100644 index 0000000000000000000000000000000000000000..a1f3f0fabef2d6ea6043cb08ed6e5578fe822cab Binary files /dev/null and b/parrot/share/terminfo/w/wyse150-vb differ diff --git a/parrot/share/terminfo/w/wyse150-w b/parrot/share/terminfo/w/wyse150-w new file mode 100644 index 0000000000000000000000000000000000000000..386bc5594a2dd5dfb5692b74fd61dc9d66ff0201 Binary files /dev/null and b/parrot/share/terminfo/w/wyse150-w differ diff --git a/parrot/share/terminfo/w/wyse160-43 b/parrot/share/terminfo/w/wyse160-43 new file mode 100644 index 0000000000000000000000000000000000000000..fda295a5d70c18d2e3146b6542500cd89aa6402a Binary files /dev/null and b/parrot/share/terminfo/w/wyse160-43 differ diff --git a/parrot/share/terminfo/w/wyse185-24 b/parrot/share/terminfo/w/wyse185-24 new file mode 100644 index 0000000000000000000000000000000000000000..fb879978654dabd0501679b6dfde3cd245fca600 Binary files /dev/null and b/parrot/share/terminfo/w/wyse185-24 differ diff --git a/parrot/share/terminfo/w/wyse325-w b/parrot/share/terminfo/w/wyse325-w new file mode 100644 index 0000000000000000000000000000000000000000..0092078d4f8f00cd566df4537fc463a379ccbfdf Binary files /dev/null and b/parrot/share/terminfo/w/wyse325-w differ diff --git a/parrot/share/terminfo/w/wyse50-vb b/parrot/share/terminfo/w/wyse50-vb new file mode 100644 index 0000000000000000000000000000000000000000..ae60620e1ab9ffcc6dc595b4c2eba9fa2b4cb6a0 Binary files /dev/null and b/parrot/share/terminfo/w/wyse50-vb differ diff --git a/parrot/share/terminfo/w/wyse50-wvb b/parrot/share/terminfo/w/wyse50-wvb new file mode 100644 index 0000000000000000000000000000000000000000..eb00142c54e22dba8100f38c8e0d3774b07d9200 Binary files /dev/null and b/parrot/share/terminfo/w/wyse50-wvb differ diff --git a/parrot/share/terminfo/w/wyse520-vb b/parrot/share/terminfo/w/wyse520-vb new file mode 100644 index 0000000000000000000000000000000000000000..a537f028a0f9d0f56bef2300e431bb404469a747 Binary files /dev/null and b/parrot/share/terminfo/w/wyse520-vb differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/apply.py b/videollama2/lib/python3.10/site-packages/pandas/core/apply.py new file mode 100644 index 0000000000000000000000000000000000000000..25a71ce5b5f4fe15bc746da43681321bcb9594b8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/apply.py @@ -0,0 +1,2062 @@ +from __future__ import annotations + +import abc +from collections import defaultdict +import functools +from functools import partial +import inspect +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, +) +import warnings + +import numpy as np + +from pandas._config import option_context + +from pandas._libs import lib +from pandas._libs.internals import BlockValuesRefs +from pandas._typing import ( + AggFuncType, + AggFuncTypeBase, + AggFuncTypeDict, + AggObjType, + Axis, + AxisInt, + NDFrameT, + npt, +) +from pandas.compat._optional import import_optional_dependency +from pandas.errors import SpecificationError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import is_nested_object +from pandas.core.dtypes.common import ( + is_dict_like, + is_extension_array_dtype, + is_list_like, + is_numeric_dtype, + is_sequence, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCNDFrame, + ABCSeries, +) + +from pandas.core._numba.executor import generate_apply_looper +import pandas.core.common as com +from pandas.core.construction import ensure_wrapped_if_datetimelike + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Hashable, + Iterable, + MutableMapping, + Sequence, + ) + + from pandas import ( + DataFrame, + Index, + Series, + ) + from pandas.core.groupby import GroupBy + from pandas.core.resample import Resampler + from pandas.core.window.rolling import BaseWindow + + +ResType = dict[int, Any] + + +def frame_apply( + obj: DataFrame, + func: AggFuncType, + axis: Axis = 0, + raw: bool = False, + result_type: str | None = None, + by_row: Literal[False, "compat"] = "compat", + engine: str = "python", + engine_kwargs: dict[str, bool] | None = None, + args=None, + kwargs=None, +) -> FrameApply: + """construct and return a row or column based frame apply object""" + axis = obj._get_axis_number(axis) + klass: type[FrameApply] + if axis == 0: + klass = FrameRowApply + elif axis == 1: + klass = FrameColumnApply + + _, func, _, _ = reconstruct_func(func, **kwargs) + assert func is not None + + return klass( + obj, + func, + raw=raw, + result_type=result_type, + by_row=by_row, + engine=engine, + engine_kwargs=engine_kwargs, + args=args, + kwargs=kwargs, + ) + + +class Apply(metaclass=abc.ABCMeta): + axis: AxisInt + + def __init__( + self, + obj: AggObjType, + func: AggFuncType, + raw: bool, + result_type: str | None, + *, + by_row: Literal[False, "compat", "_compat"] = "compat", + engine: str = "python", + engine_kwargs: dict[str, bool] | None = None, + args, + kwargs, + ) -> None: + self.obj = obj + self.raw = raw + + assert by_row is False or by_row in ["compat", "_compat"] + self.by_row = by_row + + self.args = args or () + self.kwargs = kwargs or {} + + self.engine = engine + self.engine_kwargs = {} if engine_kwargs is None else engine_kwargs + + if result_type not in [None, "reduce", "broadcast", "expand"]: + raise ValueError( + "invalid value for result_type, must be one " + "of {None, 'reduce', 'broadcast', 'expand'}" + ) + + self.result_type = result_type + + self.func = func + + @abc.abstractmethod + def apply(self) -> DataFrame | Series: + pass + + @abc.abstractmethod + def agg_or_apply_list_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + pass + + @abc.abstractmethod + def agg_or_apply_dict_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + pass + + def agg(self) -> DataFrame | Series | None: + """ + Provide an implementation for the aggregators. + + Returns + ------- + Result of aggregation, or None if agg cannot be performed by + this method. + """ + obj = self.obj + func = self.func + args = self.args + kwargs = self.kwargs + + if isinstance(func, str): + return self.apply_str() + + if is_dict_like(func): + return self.agg_dict_like() + elif is_list_like(func): + # we require a list, but not a 'str' + return self.agg_list_like() + + if callable(func): + f = com.get_cython_func(func) + if f and not args and not kwargs: + warn_alias_replacement(obj, func, f) + return getattr(obj, f)() + + # caller can react + return None + + def transform(self) -> DataFrame | Series: + """ + Transform a DataFrame or Series. + + Returns + ------- + DataFrame or Series + Result of applying ``func`` along the given axis of the + Series or DataFrame. + + Raises + ------ + ValueError + If the transform function fails or does not transform. + """ + obj = self.obj + func = self.func + axis = self.axis + args = self.args + kwargs = self.kwargs + + is_series = obj.ndim == 1 + + if obj._get_axis_number(axis) == 1: + assert not is_series + return obj.T.transform(func, 0, *args, **kwargs).T + + if is_list_like(func) and not is_dict_like(func): + func = cast(list[AggFuncTypeBase], func) + # Convert func equivalent dict + if is_series: + func = {com.get_callable_name(v) or v: v for v in func} + else: + func = {col: func for col in obj} + + if is_dict_like(func): + func = cast(AggFuncTypeDict, func) + return self.transform_dict_like(func) + + # func is either str or callable + func = cast(AggFuncTypeBase, func) + try: + result = self.transform_str_or_callable(func) + except TypeError: + raise + except Exception as err: + raise ValueError("Transform function failed") from err + + # Functions that transform may return empty Series/DataFrame + # when the dtype is not appropriate + if ( + isinstance(result, (ABCSeries, ABCDataFrame)) + and result.empty + and not obj.empty + ): + raise ValueError("Transform function failed") + # error: Argument 1 to "__get__" of "AxisProperty" has incompatible type + # "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy, + # DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame, + # Series]" + if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals( + obj.index # type: ignore[arg-type] + ): + raise ValueError("Function did not transform") + + return result + + def transform_dict_like(self, func) -> DataFrame: + """ + Compute transform in the case of a dict-like func + """ + from pandas.core.reshape.concat import concat + + obj = self.obj + args = self.args + kwargs = self.kwargs + + # transform is currently only for Series/DataFrame + assert isinstance(obj, ABCNDFrame) + + if len(func) == 0: + raise ValueError("No transform functions were provided") + + func = self.normalize_dictlike_arg("transform", obj, func) + + results: dict[Hashable, DataFrame | Series] = {} + for name, how in func.items(): + colg = obj._gotitem(name, ndim=1) + results[name] = colg.transform(how, 0, *args, **kwargs) + return concat(results, axis=1) + + def transform_str_or_callable(self, func) -> DataFrame | Series: + """ + Compute transform in the case of a string or callable func + """ + obj = self.obj + args = self.args + kwargs = self.kwargs + + if isinstance(func, str): + return self._apply_str(obj, func, *args, **kwargs) + + if not args and not kwargs: + f = com.get_cython_func(func) + if f: + warn_alias_replacement(obj, func, f) + return getattr(obj, f)() + + # Two possible ways to use a UDF - apply or call directly + try: + return obj.apply(func, args=args, **kwargs) + except Exception: + return func(obj, *args, **kwargs) + + def agg_list_like(self) -> DataFrame | Series: + """ + Compute aggregation in the case of a list-like argument. + + Returns + ------- + Result of aggregation. + """ + return self.agg_or_apply_list_like(op_name="agg") + + def compute_list_like( + self, + op_name: Literal["agg", "apply"], + selected_obj: Series | DataFrame, + kwargs: dict[str, Any], + ) -> tuple[list[Hashable] | Index, list[Any]]: + """ + Compute agg/apply results for like-like input. + + Parameters + ---------- + op_name : {"agg", "apply"} + Operation being performed. + selected_obj : Series or DataFrame + Data to perform operation on. + kwargs : dict + Keyword arguments to pass to the functions. + + Returns + ------- + keys : list[Hashable] or Index + Index labels for result. + results : list + Data for result. When aggregating with a Series, this can contain any + Python objects. + """ + func = cast(list[AggFuncTypeBase], self.func) + obj = self.obj + + results = [] + keys = [] + + # degenerate case + if selected_obj.ndim == 1: + for a in func: + colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj) + args = ( + [self.axis, *self.args] + if include_axis(op_name, colg) + else self.args + ) + new_res = getattr(colg, op_name)(a, *args, **kwargs) + results.append(new_res) + + # make sure we find a good name + name = com.get_callable_name(a) or a + keys.append(name) + + else: + indices = [] + for index, col in enumerate(selected_obj): + colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index]) + args = ( + [self.axis, *self.args] + if include_axis(op_name, colg) + else self.args + ) + new_res = getattr(colg, op_name)(func, *args, **kwargs) + results.append(new_res) + indices.append(index) + # error: Incompatible types in assignment (expression has type "Any | + # Index", variable has type "list[Any | Callable[..., Any] | str]") + keys = selected_obj.columns.take(indices) # type: ignore[assignment] + + return keys, results + + def wrap_results_list_like( + self, keys: Iterable[Hashable], results: list[Series | DataFrame] + ): + from pandas.core.reshape.concat import concat + + obj = self.obj + + try: + return concat(results, keys=keys, axis=1, sort=False) + except TypeError as err: + # we are concatting non-NDFrame objects, + # e.g. a list of scalars + from pandas import Series + + result = Series(results, index=keys, name=obj.name) + if is_nested_object(result): + raise ValueError( + "cannot combine transform and aggregation operations" + ) from err + return result + + def agg_dict_like(self) -> DataFrame | Series: + """ + Compute aggregation in the case of a dict-like argument. + + Returns + ------- + Result of aggregation. + """ + return self.agg_or_apply_dict_like(op_name="agg") + + def compute_dict_like( + self, + op_name: Literal["agg", "apply"], + selected_obj: Series | DataFrame, + selection: Hashable | Sequence[Hashable], + kwargs: dict[str, Any], + ) -> tuple[list[Hashable], list[Any]]: + """ + Compute agg/apply results for dict-like input. + + Parameters + ---------- + op_name : {"agg", "apply"} + Operation being performed. + selected_obj : Series or DataFrame + Data to perform operation on. + selection : hashable or sequence of hashables + Used by GroupBy, Window, and Resample if selection is applied to the object. + kwargs : dict + Keyword arguments to pass to the functions. + + Returns + ------- + keys : list[hashable] + Index labels for result. + results : list + Data for result. When aggregating with a Series, this can contain any + Python object. + """ + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + SeriesGroupBy, + ) + + obj = self.obj + is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)) + func = cast(AggFuncTypeDict, self.func) + func = self.normalize_dictlike_arg(op_name, selected_obj, func) + + is_non_unique_col = ( + selected_obj.ndim == 2 + and selected_obj.columns.nunique() < len(selected_obj.columns) + ) + + if selected_obj.ndim == 1: + # key only used for output + colg = obj._gotitem(selection, ndim=1) + results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()] + keys = list(func.keys()) + elif not is_groupby and is_non_unique_col: + # key used for column selection and output + # GH#51099 + results = [] + keys = [] + for key, how in func.items(): + indices = selected_obj.columns.get_indexer_for([key]) + labels = selected_obj.columns.take(indices) + label_to_indices = defaultdict(list) + for index, label in zip(indices, labels): + label_to_indices[label].append(index) + + key_data = [ + getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs) + for label, indices in label_to_indices.items() + for indice in indices + ] + + keys += [key] * len(key_data) + results += key_data + else: + # key used for column selection and output + results = [ + getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs) + for key, how in func.items() + ] + keys = list(func.keys()) + + return keys, results + + def wrap_results_dict_like( + self, + selected_obj: Series | DataFrame, + result_index: list[Hashable], + result_data: list, + ): + from pandas import Index + from pandas.core.reshape.concat import concat + + obj = self.obj + + # Avoid making two isinstance calls in all and any below + is_ndframe = [isinstance(r, ABCNDFrame) for r in result_data] + + if all(is_ndframe): + results = dict(zip(result_index, result_data)) + keys_to_use: Iterable[Hashable] + keys_to_use = [k for k in result_index if not results[k].empty] + # Have to check, if at least one DataFrame is not empty. + keys_to_use = keys_to_use if keys_to_use != [] else result_index + if selected_obj.ndim == 2: + # keys are columns, so we can preserve names + ktu = Index(keys_to_use) + ktu._set_names(selected_obj.columns.names) + keys_to_use = ktu + + axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1 + result = concat( + {k: results[k] for k in keys_to_use}, + axis=axis, + keys=keys_to_use, + ) + elif any(is_ndframe): + # There is a mix of NDFrames and scalars + raise ValueError( + "cannot perform both aggregation " + "and transformation operations " + "simultaneously" + ) + else: + from pandas import Series + + # we have a list of scalars + # GH 36212 use name only if obj is a series + if obj.ndim == 1: + obj = cast("Series", obj) + name = obj.name + else: + name = None + + result = Series(result_data, index=result_index, name=name) + + return result + + def apply_str(self) -> DataFrame | Series: + """ + Compute apply in case of a string. + + Returns + ------- + result: Series or DataFrame + """ + # Caller is responsible for checking isinstance(self.f, str) + func = cast(str, self.func) + + obj = self.obj + + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + SeriesGroupBy, + ) + + # Support for `frame.transform('method')` + # Some methods (shift, etc.) require the axis argument, others + # don't, so inspect and insert if necessary. + method = getattr(obj, func, None) + if callable(method): + sig = inspect.getfullargspec(method) + arg_names = (*sig.args, *sig.kwonlyargs) + if self.axis != 0 and ( + "axis" not in arg_names or func in ("corrwith", "skew") + ): + raise ValueError(f"Operation {func} does not support axis=1") + if "axis" in arg_names: + if isinstance(obj, (SeriesGroupBy, DataFrameGroupBy)): + # Try to avoid FutureWarning for deprecated axis keyword; + # If self.axis matches the axis we would get by not passing + # axis, we safely exclude the keyword. + + default_axis = 0 + if func in ["idxmax", "idxmin"]: + # DataFrameGroupBy.idxmax, idxmin axis defaults to self.axis, + # whereas other axis keywords default to 0 + default_axis = self.obj.axis + + if default_axis != self.axis: + self.kwargs["axis"] = self.axis + else: + self.kwargs["axis"] = self.axis + return self._apply_str(obj, func, *self.args, **self.kwargs) + + def apply_list_or_dict_like(self) -> DataFrame | Series: + """ + Compute apply in case of a list-like or dict-like. + + Returns + ------- + result: Series, DataFrame, or None + Result when self.func is a list-like or dict-like, None otherwise. + """ + + if self.engine == "numba": + raise NotImplementedError( + "The 'numba' engine doesn't support list-like/" + "dict likes of callables yet." + ) + + if self.axis == 1 and isinstance(self.obj, ABCDataFrame): + return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T + + func = self.func + kwargs = self.kwargs + + if is_dict_like(func): + result = self.agg_or_apply_dict_like(op_name="apply") + else: + result = self.agg_or_apply_list_like(op_name="apply") + + result = reconstruct_and_relabel_result(result, func, **kwargs) + + return result + + def normalize_dictlike_arg( + self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict + ) -> AggFuncTypeDict: + """ + Handler for dict-like argument. + + Ensures that necessary columns exist if obj is a DataFrame, and + that a nested renamer is not passed. Also normalizes to all lists + when values consists of a mix of list and non-lists. + """ + assert how in ("apply", "agg", "transform") + + # Can't use func.values(); wouldn't work for a Series + if ( + how == "agg" + and isinstance(obj, ABCSeries) + and any(is_list_like(v) for _, v in func.items()) + ) or (any(is_dict_like(v) for _, v in func.items())): + # GH 15931 - deprecation of renaming keys + raise SpecificationError("nested renamer is not supported") + + if obj.ndim != 1: + # Check for missing columns on a frame + from pandas import Index + + cols = Index(list(func.keys())).difference(obj.columns, sort=True) + if len(cols) > 0: + raise KeyError(f"Column(s) {list(cols)} do not exist") + + aggregator_types = (list, tuple, dict) + + # if we have a dict of any non-scalars + # eg. {'A' : ['mean']}, normalize all to + # be list-likes + # Cannot use func.values() because arg may be a Series + if any(isinstance(x, aggregator_types) for _, x in func.items()): + new_func: AggFuncTypeDict = {} + for k, v in func.items(): + if not isinstance(v, aggregator_types): + new_func[k] = [v] + else: + new_func[k] = v + func = new_func + return func + + def _apply_str(self, obj, func: str, *args, **kwargs): + """ + if arg is a string, then try to operate on it: + - try to find a function (or attribute) on obj + - try to find a numpy function + - raise + """ + assert isinstance(func, str) + + if hasattr(obj, func): + f = getattr(obj, func) + if callable(f): + return f(*args, **kwargs) + + # people may aggregate on a non-callable attribute + # but don't let them think they can pass args to it + assert len(args) == 0 + assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0 + return f + elif hasattr(np, func) and hasattr(obj, "__array__"): + # in particular exclude Window + f = getattr(np, func) + return f(obj, *args, **kwargs) + else: + msg = f"'{func}' is not a valid function for '{type(obj).__name__}' object" + raise AttributeError(msg) + + +class NDFrameApply(Apply): + """ + Methods shared by FrameApply and SeriesApply but + not GroupByApply or ResamplerWindowApply + """ + + obj: DataFrame | Series + + @property + def index(self) -> Index: + return self.obj.index + + @property + def agg_axis(self) -> Index: + return self.obj._get_agg_axis(self.axis) + + def agg_or_apply_list_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + obj = self.obj + kwargs = self.kwargs + + if op_name == "apply": + if isinstance(self, FrameApply): + by_row = self.by_row + + elif isinstance(self, SeriesApply): + by_row = "_compat" if self.by_row else False + else: + by_row = False + kwargs = {**kwargs, "by_row": by_row} + + if getattr(obj, "axis", 0) == 1: + raise NotImplementedError("axis other than 0 is not supported") + + keys, results = self.compute_list_like(op_name, obj, kwargs) + result = self.wrap_results_list_like(keys, results) + return result + + def agg_or_apply_dict_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + assert op_name in ["agg", "apply"] + obj = self.obj + + kwargs = {} + if op_name == "apply": + by_row = "_compat" if self.by_row else False + kwargs.update({"by_row": by_row}) + + if getattr(obj, "axis", 0) == 1: + raise NotImplementedError("axis other than 0 is not supported") + + selection = None + result_index, result_data = self.compute_dict_like( + op_name, obj, selection, kwargs + ) + result = self.wrap_results_dict_like(obj, result_index, result_data) + return result + + +class FrameApply(NDFrameApply): + obj: DataFrame + + def __init__( + self, + obj: AggObjType, + func: AggFuncType, + raw: bool, + result_type: str | None, + *, + by_row: Literal[False, "compat"] = False, + engine: str = "python", + engine_kwargs: dict[str, bool] | None = None, + args, + kwargs, + ) -> None: + if by_row is not False and by_row != "compat": + raise ValueError(f"by_row={by_row} not allowed") + super().__init__( + obj, + func, + raw, + result_type, + by_row=by_row, + engine=engine, + engine_kwargs=engine_kwargs, + args=args, + kwargs=kwargs, + ) + + # --------------------------------------------------------------- + # Abstract Methods + + @property + @abc.abstractmethod + def result_index(self) -> Index: + pass + + @property + @abc.abstractmethod + def result_columns(self) -> Index: + pass + + @property + @abc.abstractmethod + def series_generator(self) -> Generator[Series, None, None]: + pass + + @staticmethod + @functools.cache + @abc.abstractmethod + def generate_numba_apply_func( + func, nogil=True, nopython=True, parallel=False + ) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]: + pass + + @abc.abstractmethod + def apply_with_numba(self): + pass + + def validate_values_for_numba(self): + # Validate column dtyps all OK + for colname, dtype in self.obj.dtypes.items(): + if not is_numeric_dtype(dtype): + raise ValueError( + f"Column {colname} must have a numeric dtype. " + f"Found '{dtype}' instead" + ) + if is_extension_array_dtype(dtype): + raise ValueError( + f"Column {colname} is backed by an extension array, " + f"which is not supported by the numba engine." + ) + + @abc.abstractmethod + def wrap_results_for_axis( + self, results: ResType, res_index: Index + ) -> DataFrame | Series: + pass + + # --------------------------------------------------------------- + + @property + def res_columns(self) -> Index: + return self.result_columns + + @property + def columns(self) -> Index: + return self.obj.columns + + @cache_readonly + def values(self): + return self.obj.values + + def apply(self) -> DataFrame | Series: + """compute the results""" + + # dispatch to handle list-like or dict-like + if is_list_like(self.func): + if self.engine == "numba": + raise NotImplementedError( + "the 'numba' engine doesn't support lists of callables yet" + ) + return self.apply_list_or_dict_like() + + # all empty + if len(self.columns) == 0 and len(self.index) == 0: + return self.apply_empty_result() + + # string dispatch + if isinstance(self.func, str): + if self.engine == "numba": + raise NotImplementedError( + "the 'numba' engine doesn't support using " + "a string as the callable function" + ) + return self.apply_str() + + # ufunc + elif isinstance(self.func, np.ufunc): + if self.engine == "numba": + raise NotImplementedError( + "the 'numba' engine doesn't support " + "using a numpy ufunc as the callable function" + ) + with np.errstate(all="ignore"): + results = self.obj._mgr.apply("apply", func=self.func) + # _constructor will retain self.index and self.columns + return self.obj._constructor_from_mgr(results, axes=results.axes) + + # broadcasting + if self.result_type == "broadcast": + if self.engine == "numba": + raise NotImplementedError( + "the 'numba' engine doesn't support result_type='broadcast'" + ) + return self.apply_broadcast(self.obj) + + # one axis empty + elif not all(self.obj.shape): + return self.apply_empty_result() + + # raw + elif self.raw: + return self.apply_raw(engine=self.engine, engine_kwargs=self.engine_kwargs) + + return self.apply_standard() + + def agg(self): + obj = self.obj + axis = self.axis + + # TODO: Avoid having to change state + self.obj = self.obj if self.axis == 0 else self.obj.T + self.axis = 0 + + result = None + try: + result = super().agg() + finally: + self.obj = obj + self.axis = axis + + if axis == 1: + result = result.T if result is not None else result + + if result is None: + result = self.obj.apply(self.func, axis, args=self.args, **self.kwargs) + + return result + + def apply_empty_result(self): + """ + we have an empty result; at least 1 axis is 0 + + we will try to apply the function to an empty + series in order to see if this is a reduction function + """ + assert callable(self.func) + + # we are not asked to reduce or infer reduction + # so just return a copy of the existing object + if self.result_type not in ["reduce", None]: + return self.obj.copy() + + # we may need to infer + should_reduce = self.result_type == "reduce" + + from pandas import Series + + if not should_reduce: + try: + if self.axis == 0: + r = self.func( + Series([], dtype=np.float64), *self.args, **self.kwargs + ) + else: + r = self.func( + Series(index=self.columns, dtype=np.float64), + *self.args, + **self.kwargs, + ) + except Exception: + pass + else: + should_reduce = not isinstance(r, Series) + + if should_reduce: + if len(self.agg_axis): + r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs) + else: + r = np.nan + + return self.obj._constructor_sliced(r, index=self.agg_axis) + else: + return self.obj.copy() + + def apply_raw(self, engine="python", engine_kwargs=None): + """apply to the values as a numpy array""" + + def wrap_function(func): + """ + Wrap user supplied function to work around numpy issue. + + see https://github.com/numpy/numpy/issues/8352 + """ + + def wrapper(*args, **kwargs): + result = func(*args, **kwargs) + if isinstance(result, str): + result = np.array(result, dtype=object) + return result + + return wrapper + + if engine == "numba": + engine_kwargs = {} if engine_kwargs is None else engine_kwargs + + # error: Argument 1 to "__call__" of "_lru_cache_wrapper" has + # incompatible type "Callable[..., Any] | str | list[Callable + # [..., Any] | str] | dict[Hashable,Callable[..., Any] | str | + # list[Callable[..., Any] | str]]"; expected "Hashable" + nb_looper = generate_apply_looper( + self.func, **engine_kwargs # type: ignore[arg-type] + ) + result = nb_looper(self.values, self.axis) + # If we made the result 2-D, squeeze it back to 1-D + result = np.squeeze(result) + else: + result = np.apply_along_axis( + wrap_function(self.func), + self.axis, + self.values, + *self.args, + **self.kwargs, + ) + + # TODO: mixed type case + if result.ndim == 2: + return self.obj._constructor(result, index=self.index, columns=self.columns) + else: + return self.obj._constructor_sliced(result, index=self.agg_axis) + + def apply_broadcast(self, target: DataFrame) -> DataFrame: + assert callable(self.func) + + result_values = np.empty_like(target.values) + + # axis which we want to compare compliance + result_compare = target.shape[0] + + for i, col in enumerate(target.columns): + res = self.func(target[col], *self.args, **self.kwargs) + ares = np.asarray(res).ndim + + # must be a scalar or 1d + if ares > 1: + raise ValueError("too many dims to broadcast") + if ares == 1: + # must match return dim + if result_compare != len(res): + raise ValueError("cannot broadcast result") + + result_values[:, i] = res + + # we *always* preserve the original index / columns + result = self.obj._constructor( + result_values, index=target.index, columns=target.columns + ) + return result + + def apply_standard(self): + if self.engine == "python": + results, res_index = self.apply_series_generator() + else: + results, res_index = self.apply_series_numba() + + # wrap results + return self.wrap_results(results, res_index) + + def apply_series_generator(self) -> tuple[ResType, Index]: + assert callable(self.func) + + series_gen = self.series_generator + res_index = self.result_index + + results = {} + + with option_context("mode.chained_assignment", None): + for i, v in enumerate(series_gen): + # ignore SettingWithCopy here in case the user mutates + results[i] = self.func(v, *self.args, **self.kwargs) + if isinstance(results[i], ABCSeries): + # If we have a view on v, we need to make a copy because + # series_generator will swap out the underlying data + results[i] = results[i].copy(deep=False) + + return results, res_index + + def apply_series_numba(self): + if self.engine_kwargs.get("parallel", False): + raise NotImplementedError( + "Parallel apply is not supported when raw=False and engine='numba'" + ) + if not self.obj.index.is_unique or not self.columns.is_unique: + raise NotImplementedError( + "The index/columns must be unique when raw=False and engine='numba'" + ) + self.validate_values_for_numba() + results = self.apply_with_numba() + return results, self.result_index + + def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series: + from pandas import Series + + # see if we can infer the results + if len(results) > 0 and 0 in results and is_sequence(results[0]): + return self.wrap_results_for_axis(results, res_index) + + # dict of scalars + + # the default dtype of an empty Series is `object`, but this + # code can be hit by df.mean() where the result should have dtype + # float64 even if it's an empty Series. + constructor_sliced = self.obj._constructor_sliced + if len(results) == 0 and constructor_sliced is Series: + result = constructor_sliced(results, dtype=np.float64) + else: + result = constructor_sliced(results) + result.index = res_index + + return result + + def apply_str(self) -> DataFrame | Series: + # Caller is responsible for checking isinstance(self.func, str) + # TODO: GH#39993 - Avoid special-casing by replacing with lambda + if self.func == "size": + # Special-cased because DataFrame.size returns a single scalar + obj = self.obj + value = obj.shape[self.axis] + return obj._constructor_sliced(value, index=self.agg_axis) + return super().apply_str() + + +class FrameRowApply(FrameApply): + axis: AxisInt = 0 + + @property + def series_generator(self) -> Generator[Series, None, None]: + return (self.obj._ixs(i, axis=1) for i in range(len(self.columns))) + + @staticmethod + @functools.cache + def generate_numba_apply_func( + func, nogil=True, nopython=True, parallel=False + ) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]: + numba = import_optional_dependency("numba") + from pandas import Series + + # Import helper from extensions to cast string object -> np strings + # Note: This also has the side effect of loading our numba extensions + from pandas.core._numba.extensions import maybe_cast_str + + jitted_udf = numba.extending.register_jitable(func) + + # Currently the parallel argument doesn't get passed through here + # (it's disabled) since the dicts in numba aren't thread-safe. + @numba.jit(nogil=nogil, nopython=nopython, parallel=parallel) + def numba_func(values, col_names, df_index): + results = {} + for j in range(values.shape[1]): + # Create the series + ser = Series( + values[:, j], index=df_index, name=maybe_cast_str(col_names[j]) + ) + results[j] = jitted_udf(ser) + return results + + return numba_func + + def apply_with_numba(self) -> dict[int, Any]: + nb_func = self.generate_numba_apply_func( + cast(Callable, self.func), **self.engine_kwargs + ) + from pandas.core._numba.extensions import set_numba_data + + index = self.obj.index + if index.dtype == "string": + index = index.astype(object) + + columns = self.obj.columns + if columns.dtype == "string": + columns = columns.astype(object) + + # Convert from numba dict to regular dict + # Our isinstance checks in the df constructor don't pass for numbas typed dict + with set_numba_data(index) as index, set_numba_data(columns) as columns: + res = dict(nb_func(self.values, columns, index)) + return res + + @property + def result_index(self) -> Index: + return self.columns + + @property + def result_columns(self) -> Index: + return self.index + + def wrap_results_for_axis( + self, results: ResType, res_index: Index + ) -> DataFrame | Series: + """return the results for the rows""" + + if self.result_type == "reduce": + # e.g. test_apply_dict GH#8735 + res = self.obj._constructor_sliced(results) + res.index = res_index + return res + + elif self.result_type is None and all( + isinstance(x, dict) for x in results.values() + ): + # Our operation was a to_dict op e.g. + # test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544 + res = self.obj._constructor_sliced(results) + res.index = res_index + return res + + try: + result = self.obj._constructor(data=results) + except ValueError as err: + if "All arrays must be of the same length" in str(err): + # e.g. result = [[2, 3], [1.5], ['foo', 'bar']] + # see test_agg_listlike_result GH#29587 + res = self.obj._constructor_sliced(results) + res.index = res_index + return res + else: + raise + + if not isinstance(results[0], ABCSeries): + if len(result.index) == len(self.res_columns): + result.index = self.res_columns + + if len(result.columns) == len(res_index): + result.columns = res_index + + return result + + +class FrameColumnApply(FrameApply): + axis: AxisInt = 1 + + def apply_broadcast(self, target: DataFrame) -> DataFrame: + result = super().apply_broadcast(target.T) + return result.T + + @property + def series_generator(self) -> Generator[Series, None, None]: + values = self.values + values = ensure_wrapped_if_datetimelike(values) + assert len(values) > 0 + + # We create one Series object, and will swap out the data inside + # of it. Kids: don't do this at home. + ser = self.obj._ixs(0, axis=0) + mgr = ser._mgr + + is_view = mgr.blocks[0].refs.has_reference() # type: ignore[union-attr] + + if isinstance(ser.dtype, ExtensionDtype): + # values will be incorrect for this block + # TODO(EA2D): special case would be unnecessary with 2D EAs + obj = self.obj + for i in range(len(obj)): + yield obj._ixs(i, axis=0) + + else: + for arr, name in zip(values, self.index): + # GH#35462 re-pin mgr in case setitem changed it + ser._mgr = mgr + mgr.set_values(arr) + object.__setattr__(ser, "_name", name) + if not is_view: + # In apply_series_generator we store the a shallow copy of the + # result, which potentially increases the ref count of this reused + # `ser` object (depending on the result of the applied function) + # -> if that happened and `ser` is already a copy, then we reset + # the refs here to avoid triggering a unnecessary CoW inside the + # applied function (https://github.com/pandas-dev/pandas/pull/56212) + mgr.blocks[0].refs = BlockValuesRefs(mgr.blocks[0]) # type: ignore[union-attr] + yield ser + + @staticmethod + @functools.cache + def generate_numba_apply_func( + func, nogil=True, nopython=True, parallel=False + ) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]: + numba = import_optional_dependency("numba") + from pandas import Series + from pandas.core._numba.extensions import maybe_cast_str + + jitted_udf = numba.extending.register_jitable(func) + + @numba.jit(nogil=nogil, nopython=nopython, parallel=parallel) + def numba_func(values, col_names_index, index): + results = {} + # Currently the parallel argument doesn't get passed through here + # (it's disabled) since the dicts in numba aren't thread-safe. + for i in range(values.shape[0]): + # Create the series + # TODO: values corrupted without the copy + ser = Series( + values[i].copy(), + index=col_names_index, + name=maybe_cast_str(index[i]), + ) + results[i] = jitted_udf(ser) + + return results + + return numba_func + + def apply_with_numba(self) -> dict[int, Any]: + nb_func = self.generate_numba_apply_func( + cast(Callable, self.func), **self.engine_kwargs + ) + + from pandas.core._numba.extensions import set_numba_data + + # Convert from numba dict to regular dict + # Our isinstance checks in the df constructor don't pass for numbas typed dict + with set_numba_data(self.obj.index) as index, set_numba_data( + self.columns + ) as columns: + res = dict(nb_func(self.values, columns, index)) + + return res + + @property + def result_index(self) -> Index: + return self.index + + @property + def result_columns(self) -> Index: + return self.columns + + def wrap_results_for_axis( + self, results: ResType, res_index: Index + ) -> DataFrame | Series: + """return the results for the columns""" + result: DataFrame | Series + + # we have requested to expand + if self.result_type == "expand": + result = self.infer_to_same_shape(results, res_index) + + # we have a non-series and don't want inference + elif not isinstance(results[0], ABCSeries): + result = self.obj._constructor_sliced(results) + result.index = res_index + + # we may want to infer results + else: + result = self.infer_to_same_shape(results, res_index) + + return result + + def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame: + """infer the results to the same shape as the input object""" + result = self.obj._constructor(data=results) + result = result.T + + # set the index + result.index = res_index + + # infer dtypes + result = result.infer_objects(copy=False) + + return result + + +class SeriesApply(NDFrameApply): + obj: Series + axis: AxisInt = 0 + by_row: Literal[False, "compat", "_compat"] # only relevant for apply() + + def __init__( + self, + obj: Series, + func: AggFuncType, + *, + convert_dtype: bool | lib.NoDefault = lib.no_default, + by_row: Literal[False, "compat", "_compat"] = "compat", + args, + kwargs, + ) -> None: + if convert_dtype is lib.no_default: + convert_dtype = True + else: + warnings.warn( + "the convert_dtype parameter is deprecated and will be removed in a " + "future version. Do ``ser.astype(object).apply()`` " + "instead if you want ``convert_dtype=False``.", + FutureWarning, + stacklevel=find_stack_level(), + ) + self.convert_dtype = convert_dtype + + super().__init__( + obj, + func, + raw=False, + result_type=None, + by_row=by_row, + args=args, + kwargs=kwargs, + ) + + def apply(self) -> DataFrame | Series: + obj = self.obj + + if len(obj) == 0: + return self.apply_empty_result() + + # dispatch to handle list-like or dict-like + if is_list_like(self.func): + return self.apply_list_or_dict_like() + + if isinstance(self.func, str): + # if we are a string, try to dispatch + return self.apply_str() + + if self.by_row == "_compat": + return self.apply_compat() + + # self.func is Callable + return self.apply_standard() + + def agg(self): + result = super().agg() + if result is None: + obj = self.obj + func = self.func + # string, list-like, and dict-like are entirely handled in super + assert callable(func) + + # GH53325: The setup below is just to keep current behavior while emitting a + # deprecation message. In the future this will all be replaced with a simple + # `result = f(self.obj, *self.args, **self.kwargs)`. + try: + result = obj.apply(func, args=self.args, **self.kwargs) + except (ValueError, AttributeError, TypeError): + result = func(obj, *self.args, **self.kwargs) + else: + msg = ( + f"using {func} in {type(obj).__name__}.agg cannot aggregate and " + f"has been deprecated. Use {type(obj).__name__}.transform to " + f"keep behavior unchanged." + ) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + + return result + + def apply_empty_result(self) -> Series: + obj = self.obj + return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__( + obj, method="apply" + ) + + def apply_compat(self): + """compat apply method for funcs in listlikes and dictlikes. + + Used for each callable when giving listlikes and dictlikes of callables to + apply. Needed for compatibility with Pandas < v2.1. + + .. versionadded:: 2.1.0 + """ + obj = self.obj + func = self.func + + if callable(func): + f = com.get_cython_func(func) + if f and not self.args and not self.kwargs: + return obj.apply(func, by_row=False) + + try: + result = obj.apply(func, by_row="compat") + except (ValueError, AttributeError, TypeError): + result = obj.apply(func, by_row=False) + return result + + def apply_standard(self) -> DataFrame | Series: + # caller is responsible for ensuring that f is Callable + func = cast(Callable, self.func) + obj = self.obj + + if isinstance(func, np.ufunc): + with np.errstate(all="ignore"): + return func(obj, *self.args, **self.kwargs) + elif not self.by_row: + return func(obj, *self.args, **self.kwargs) + + if self.args or self.kwargs: + # _map_values does not support args/kwargs + def curried(x): + return func(x, *self.args, **self.kwargs) + + else: + curried = func + + # row-wise access + # apply doesn't have a `na_action` keyword and for backward compat reasons + # we need to give `na_action="ignore"` for categorical data. + # TODO: remove the `na_action="ignore"` when that default has been changed in + # Categorical (GH51645). + action = "ignore" if isinstance(obj.dtype, CategoricalDtype) else None + mapped = obj._map_values( + mapper=curried, na_action=action, convert=self.convert_dtype + ) + + if len(mapped) and isinstance(mapped[0], ABCSeries): + # GH#43986 Need to do list(mapped) in order to get treated as nested + # See also GH#25959 regarding EA support + return obj._constructor_expanddim(list(mapped), index=obj.index) + else: + return obj._constructor(mapped, index=obj.index).__finalize__( + obj, method="apply" + ) + + +class GroupByApply(Apply): + obj: GroupBy | Resampler | BaseWindow + + def __init__( + self, + obj: GroupBy[NDFrameT], + func: AggFuncType, + *, + args, + kwargs, + ) -> None: + kwargs = kwargs.copy() + self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0)) + super().__init__( + obj, + func, + raw=False, + result_type=None, + args=args, + kwargs=kwargs, + ) + + def apply(self): + raise NotImplementedError + + def transform(self): + raise NotImplementedError + + def agg_or_apply_list_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + obj = self.obj + kwargs = self.kwargs + if op_name == "apply": + kwargs = {**kwargs, "by_row": False} + + if getattr(obj, "axis", 0) == 1: + raise NotImplementedError("axis other than 0 is not supported") + + if obj._selected_obj.ndim == 1: + # For SeriesGroupBy this matches _obj_with_exclusions + selected_obj = obj._selected_obj + else: + selected_obj = obj._obj_with_exclusions + + # Only set as_index=True on groupby objects, not Window or Resample + # that inherit from this class. + with com.temp_setattr( + obj, "as_index", True, condition=hasattr(obj, "as_index") + ): + keys, results = self.compute_list_like(op_name, selected_obj, kwargs) + result = self.wrap_results_list_like(keys, results) + return result + + def agg_or_apply_dict_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + SeriesGroupBy, + ) + + assert op_name in ["agg", "apply"] + + obj = self.obj + kwargs = {} + if op_name == "apply": + by_row = "_compat" if self.by_row else False + kwargs.update({"by_row": by_row}) + + if getattr(obj, "axis", 0) == 1: + raise NotImplementedError("axis other than 0 is not supported") + + selected_obj = obj._selected_obj + selection = obj._selection + + is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)) + + # Numba Groupby engine/engine-kwargs passthrough + if is_groupby: + engine = self.kwargs.get("engine", None) + engine_kwargs = self.kwargs.get("engine_kwargs", None) + kwargs.update({"engine": engine, "engine_kwargs": engine_kwargs}) + + with com.temp_setattr( + obj, "as_index", True, condition=hasattr(obj, "as_index") + ): + result_index, result_data = self.compute_dict_like( + op_name, selected_obj, selection, kwargs + ) + result = self.wrap_results_dict_like(selected_obj, result_index, result_data) + return result + + +class ResamplerWindowApply(GroupByApply): + axis: AxisInt = 0 + obj: Resampler | BaseWindow + + def __init__( + self, + obj: Resampler | BaseWindow, + func: AggFuncType, + *, + args, + kwargs, + ) -> None: + super(GroupByApply, self).__init__( + obj, + func, + raw=False, + result_type=None, + args=args, + kwargs=kwargs, + ) + + def apply(self): + raise NotImplementedError + + def transform(self): + raise NotImplementedError + + +def reconstruct_func( + func: AggFuncType | None, **kwargs +) -> tuple[bool, AggFuncType, tuple[str, ...] | None, npt.NDArray[np.intp] | None]: + """ + This is the internal function to reconstruct func given if there is relabeling + or not and also normalize the keyword to get new order of columns. + + If named aggregation is applied, `func` will be None, and kwargs contains the + column and aggregation function information to be parsed; + If named aggregation is not applied, `func` is either string (e.g. 'min') or + Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name + and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]}) + + If relabeling is True, will return relabeling, reconstructed func, column + names, and the reconstructed order of columns. + If relabeling is False, the columns and order will be None. + + Parameters + ---------- + func: agg function (e.g. 'min' or Callable) or list of agg functions + (e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}). + **kwargs: dict, kwargs used in is_multi_agg_with_relabel and + normalize_keyword_aggregation function for relabelling + + Returns + ------- + relabelling: bool, if there is relabelling or not + func: normalized and mangled func + columns: tuple of column names + order: array of columns indices + + Examples + -------- + >>> reconstruct_func(None, **{"foo": ("col", "min")}) + (True, defaultdict(, {'col': ['min']}), ('foo',), array([0])) + + >>> reconstruct_func("min") + (False, 'min', None, None) + """ + relabeling = func is None and is_multi_agg_with_relabel(**kwargs) + columns: tuple[str, ...] | None = None + order: npt.NDArray[np.intp] | None = None + + if not relabeling: + if isinstance(func, list) and len(func) > len(set(func)): + # GH 28426 will raise error if duplicated function names are used and + # there is no reassigned name + raise SpecificationError( + "Function names must be unique if there is no new column names " + "assigned" + ) + if func is None: + # nicer error message + raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") + + if relabeling: + # error: Incompatible types in assignment (expression has type + # "MutableMapping[Hashable, list[Callable[..., Any] | str]]", variable has type + # "Callable[..., Any] | str | list[Callable[..., Any] | str] | + # MutableMapping[Hashable, Callable[..., Any] | str | list[Callable[..., Any] | + # str]] | None") + func, columns, order = normalize_keyword_aggregation( # type: ignore[assignment] + kwargs + ) + assert func is not None + + return relabeling, func, columns, order + + +def is_multi_agg_with_relabel(**kwargs) -> bool: + """ + Check whether kwargs passed to .agg look like multi-agg with relabeling. + + Parameters + ---------- + **kwargs : dict + + Returns + ------- + bool + + Examples + -------- + >>> is_multi_agg_with_relabel(a="max") + False + >>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min")) + True + >>> is_multi_agg_with_relabel() + False + """ + return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and ( + len(kwargs) > 0 + ) + + +def normalize_keyword_aggregation( + kwargs: dict, +) -> tuple[ + MutableMapping[Hashable, list[AggFuncTypeBase]], + tuple[str, ...], + npt.NDArray[np.intp], +]: + """ + Normalize user-provided "named aggregation" kwargs. + Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs + to the old Dict[str, List[scalar]]]. + + Parameters + ---------- + kwargs : dict + + Returns + ------- + aggspec : dict + The transformed kwargs. + columns : tuple[str, ...] + The user-provided keys. + col_idx_order : List[int] + List of columns indices. + + Examples + -------- + >>> normalize_keyword_aggregation({"output": ("input", "sum")}) + (defaultdict(, {'input': ['sum']}), ('output',), array([0])) + """ + from pandas.core.indexes.base import Index + + # Normalize the aggregation functions as Mapping[column, List[func]], + # process normally, then fixup the names. + # TODO: aggspec type: typing.Dict[str, List[AggScalar]] + aggspec = defaultdict(list) + order = [] + columns, pairs = list(zip(*kwargs.items())) + + for column, aggfunc in pairs: + aggspec[column].append(aggfunc) + order.append((column, com.get_callable_name(aggfunc) or aggfunc)) + + # uniquify aggfunc name if duplicated in order list + uniquified_order = _make_unique_kwarg_list(order) + + # GH 25719, due to aggspec will change the order of assigned columns in aggregation + # uniquified_aggspec will store uniquified order list and will compare it with order + # based on index + aggspec_order = [ + (column, com.get_callable_name(aggfunc) or aggfunc) + for column, aggfuncs in aggspec.items() + for aggfunc in aggfuncs + ] + uniquified_aggspec = _make_unique_kwarg_list(aggspec_order) + + # get the new index of columns by comparison + col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order) + return aggspec, columns, col_idx_order + + +def _make_unique_kwarg_list( + seq: Sequence[tuple[Any, Any]] +) -> Sequence[tuple[Any, Any]]: + """ + Uniquify aggfunc name of the pairs in the order list + + Examples: + -------- + >>> kwarg_list = [('a', ''), ('a', ''), ('b', '')] + >>> _make_unique_kwarg_list(kwarg_list) + [('a', '_0'), ('a', '_1'), ('b', '')] + """ + return [ + (pair[0], f"{pair[1]}_{seq[:i].count(pair)}") if seq.count(pair) > 1 else pair + for i, pair in enumerate(seq) + ] + + +def relabel_result( + result: DataFrame | Series, + func: dict[str, list[Callable | str]], + columns: Iterable[Hashable], + order: Iterable[int], +) -> dict[Hashable, Series]: + """ + Internal function to reorder result if relabelling is True for + dataframe.agg, and return the reordered result in dict. + + Parameters: + ---------- + result: Result from aggregation + func: Dict of (column name, funcs) + columns: New columns name for relabelling + order: New order for relabelling + + Examples + -------- + >>> from pandas.core.apply import relabel_result + >>> result = pd.DataFrame( + ... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}, + ... index=["max", "mean", "min"] + ... ) + >>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]} + >>> columns = ("foo", "aab", "bar", "dat") + >>> order = [0, 1, 2, 3] + >>> result_in_dict = relabel_result(result, funcs, columns, order) + >>> pd.DataFrame(result_in_dict, index=columns) + A C B + foo 2.0 NaN NaN + aab NaN 6.0 NaN + bar NaN NaN 4.0 + dat NaN NaN 2.5 + """ + from pandas.core.indexes.base import Index + + reordered_indexes = [ + pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1]) + ] + reordered_result_in_dict: dict[Hashable, Series] = {} + idx = 0 + + reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1 + for col, fun in func.items(): + s = result[col].dropna() + + # In the `_aggregate`, the callable names are obtained and used in `result`, and + # these names are ordered alphabetically. e.g. + # C2 C1 + # 1 NaN + # amax NaN 4.0 + # max NaN 4.0 + # sum 18.0 6.0 + # Therefore, the order of functions for each column could be shuffled + # accordingly so need to get the callable name if it is not parsed names, and + # reorder the aggregated result for each column. + # e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is + # [sum, ], but in `result`, it will be [, sum], and we need to + # reorder so that aggregated values map to their functions regarding the order. + + # However there is only one column being used for aggregation, not need to + # reorder since the index is not sorted, and keep as is in `funcs`, e.g. + # A + # min 1.0 + # mean 1.5 + # mean 1.5 + if reorder_mask: + fun = [ + com.get_callable_name(f) if not isinstance(f, str) else f for f in fun + ] + col_idx_order = Index(s.index).get_indexer(fun) + s = s.iloc[col_idx_order] + + # assign the new user-provided "named aggregation" as index names, and reindex + # it based on the whole user-provided names. + s.index = reordered_indexes[idx : idx + len(fun)] + reordered_result_in_dict[col] = s.reindex(columns, copy=False) + idx = idx + len(fun) + return reordered_result_in_dict + + +def reconstruct_and_relabel_result(result, func, **kwargs) -> DataFrame | Series: + from pandas import DataFrame + + relabeling, func, columns, order = reconstruct_func(func, **kwargs) + + if relabeling: + # This is to keep the order to columns occurrence unchanged, and also + # keep the order of new columns occurrence unchanged + + # For the return values of reconstruct_func, if relabeling is + # False, columns and order will be None. + assert columns is not None + assert order is not None + + result_in_dict = relabel_result(result, func, columns, order) + result = DataFrame(result_in_dict, index=columns) + + return result + + +# TODO: Can't use, because mypy doesn't like us setting __name__ +# error: "partial[Any]" has no attribute "__name__" +# the type is: +# typing.Sequence[Callable[..., ScalarResult]] +# -> typing.Sequence[Callable[..., ScalarResult]]: + + +def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]: + """ + Possibly mangle a list of aggfuncs. + + Parameters + ---------- + aggfuncs : Sequence + + Returns + ------- + mangled: list-like + A new AggSpec sequence, where lambdas have been converted + to have unique names. + + Notes + ----- + If just one aggfunc is passed, the name will not be mangled. + """ + if len(aggfuncs) <= 1: + # don't mangle for .agg([lambda x: .]) + return aggfuncs + i = 0 + mangled_aggfuncs = [] + for aggfunc in aggfuncs: + if com.get_callable_name(aggfunc) == "": + aggfunc = partial(aggfunc) + aggfunc.__name__ = f"" + i += 1 + mangled_aggfuncs.append(aggfunc) + + return mangled_aggfuncs + + +def maybe_mangle_lambdas(agg_spec: Any) -> Any: + """ + Make new lambdas with unique names. + + Parameters + ---------- + agg_spec : Any + An argument to GroupBy.agg. + Non-dict-like `agg_spec` are pass through as is. + For dict-like `agg_spec` a new spec is returned + with name-mangled lambdas. + + Returns + ------- + mangled : Any + Same type as the input. + + Examples + -------- + >>> maybe_mangle_lambdas('sum') + 'sum' + >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP + [, + .f(*args, **kwargs)>] + """ + is_dict = is_dict_like(agg_spec) + if not (is_dict or is_list_like(agg_spec)): + return agg_spec + mangled_aggspec = type(agg_spec)() # dict or OrderedDict + + if is_dict: + for key, aggfuncs in agg_spec.items(): + if is_list_like(aggfuncs) and not is_dict_like(aggfuncs): + mangled_aggfuncs = _managle_lambda_list(aggfuncs) + else: + mangled_aggfuncs = aggfuncs + + mangled_aggspec[key] = mangled_aggfuncs + else: + mangled_aggspec = _managle_lambda_list(agg_spec) + + return mangled_aggspec + + +def validate_func_kwargs( + kwargs: dict, +) -> tuple[list[str], list[str | Callable[..., Any]]]: + """ + Validates types of user-provided "named aggregation" kwargs. + `TypeError` is raised if aggfunc is not `str` or callable. + + Parameters + ---------- + kwargs : dict + + Returns + ------- + columns : List[str] + List of user-provided keys. + func : List[Union[str, callable[...,Any]]] + List of user-provided aggfuncs + + Examples + -------- + >>> validate_func_kwargs({'one': 'min', 'two': 'max'}) + (['one', 'two'], ['min', 'max']) + """ + tuple_given_message = "func is expected but received {} in **kwargs." + columns = list(kwargs) + func = [] + for col_func in kwargs.values(): + if not (isinstance(col_func, str) or callable(col_func)): + raise TypeError(tuple_given_message.format(type(col_func).__name__)) + func.append(col_func) + if not columns: + no_arg_message = "Must provide 'func' or named aggregation **kwargs." + raise TypeError(no_arg_message) + return columns, func + + +def include_axis(op_name: Literal["agg", "apply"], colg: Series | DataFrame) -> bool: + return isinstance(colg, ABCDataFrame) or ( + isinstance(colg, ABCSeries) and op_name == "agg" + ) + + +def warn_alias_replacement( + obj: AggObjType, + func: Callable, + alias: str, +) -> None: + if alias.startswith("np."): + full_alias = alias + else: + full_alias = f"{type(obj).__name__}.{alias}" + alias = f'"{alias}"' + warnings.warn( + f"The provided callable {func} is currently using " + f"{full_alias}. In a future version of pandas, " + f"the provided callable will be used directly. To keep current " + f"behavior pass the string {alias} instead.", + category=FutureWarning, + stacklevel=find_stack_level(), + ) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a7655a013c6cf3fca754086fdeb29b806220d5e4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py @@ -0,0 +1,9 @@ +""" +core.array_algos is for algorithms that operate on ndarray and ExtensionArray. +These should: + +- Assume that any Index, Series, or DataFrame objects have already been unwrapped. +- Assume that any list arguments have already been cast to ndarray/EA. +- Not depend on Index, Series, or DataFrame, nor import any of these. +- May dispatch to ExtensionArray methods, but should not import from core.arrays. +""" diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py new file mode 100644 index 0000000000000000000000000000000000000000..825fe60ee6cf88a2186a5f501c8696cecaf2657d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py @@ -0,0 +1,67 @@ +""" +datetimelke_accumulations.py is for accumulations of datetimelike extension arrays +""" + +from __future__ import annotations + +from typing import Callable + +import numpy as np + +from pandas._libs import iNaT + +from pandas.core.dtypes.missing import isna + + +def _cum_func( + func: Callable, + values: np.ndarray, + *, + skipna: bool = True, +): + """ + Accumulations for 1D datetimelike arrays. + + Parameters + ---------- + func : np.cumsum, np.maximum.accumulate, np.minimum.accumulate + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). Values is changed is modified inplace. + skipna : bool, default True + Whether to skip NA. + """ + try: + fill_value = { + np.maximum.accumulate: np.iinfo(np.int64).min, + np.cumsum: 0, + np.minimum.accumulate: np.iinfo(np.int64).max, + }[func] + except KeyError: + raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray") + + mask = isna(values) + y = values.view("i8") + y[mask] = fill_value + + if not skipna: + mask = np.maximum.accumulate(mask) + + result = func(y) + result[mask] = iNaT + + if values.dtype.kind in "mM": + return result.view(values.dtype.base) + return result + + +def cumsum(values: np.ndarray, *, skipna: bool = True) -> np.ndarray: + return _cum_func(np.cumsum, values, skipna=skipna) + + +def cummin(values: np.ndarray, *, skipna: bool = True): + return _cum_func(np.minimum.accumulate, values, skipna=skipna) + + +def cummax(values: np.ndarray, *, skipna: bool = True): + return _cum_func(np.maximum.accumulate, values, skipna=skipna) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py new file mode 100644 index 0000000000000000000000000000000000000000..ad9e96d398a242dc64de2018b749fd2dbca7ed78 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py @@ -0,0 +1,90 @@ +""" +masked_accumulations.py is for accumulation algorithms using a mask-based approach +for missing values. +""" + +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Callable, +) + +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + + +def _cum_func( + func: Callable, + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, +): + """ + Accumulations for 1D masked array. + + We will modify values in place to replace NAs with the appropriate fill value. + + Parameters + ---------- + func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). + mask : np.ndarray + Boolean numpy array (True values indicate missing values). + skipna : bool, default True + Whether to skip NA. + """ + dtype_info: np.iinfo | np.finfo + if values.dtype.kind == "f": + dtype_info = np.finfo(values.dtype.type) + elif values.dtype.kind in "iu": + dtype_info = np.iinfo(values.dtype.type) + elif values.dtype.kind == "b": + # Max value of bool is 1, but since we are setting into a boolean + # array, 255 is fine as well. Min value has to be 0 when setting + # into the boolean array. + dtype_info = np.iinfo(np.uint8) + else: + raise NotImplementedError( + f"No masked accumulation defined for dtype {values.dtype.type}" + ) + try: + fill_value = { + np.cumprod: 1, + np.maximum.accumulate: dtype_info.min, + np.cumsum: 0, + np.minimum.accumulate: dtype_info.max, + }[func] + except KeyError: + raise NotImplementedError( + f"No accumulation for {func} implemented on BaseMaskedArray" + ) + + values[mask] = fill_value + + if not skipna: + mask = np.maximum.accumulate(mask) + + values = func(values) + return values, mask + + +def cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.cumsum, values, mask, skipna=skipna) + + +def cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.cumprod, values, mask, skipna=skipna) + + +def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna) + + +def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py new file mode 100644 index 0000000000000000000000000000000000000000..f65d2d20e028e36b35a397d8ac973f184ce1412c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py @@ -0,0 +1,149 @@ +""" +EA-compatible analogue to np.putmask +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.cast import infer_dtype_from +from pandas.core.dtypes.common import is_list_like + +from pandas.core.arrays import ExtensionArray + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + npt, + ) + + from pandas import MultiIndex + + +def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None: + """ + ExtensionArray-compatible implementation of np.putmask. The main + difference is we do not handle repeating or truncating like numpy. + + Parameters + ---------- + values: np.ndarray or ExtensionArray + mask : np.ndarray[bool] + We assume extract_bool_array has already been called. + value : Any + """ + + if ( + not isinstance(values, np.ndarray) + or (values.dtype == object and not lib.is_scalar(value)) + # GH#43424: np.putmask raises TypeError if we cannot cast between types with + # rule = "safe", a stricter guarantee we may not have here + or ( + isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype) + ) + ): + # GH#19266 using np.putmask gives unexpected results with listlike value + # along with object dtype + if is_list_like(value) and len(value) == len(values): + values[mask] = value[mask] + else: + values[mask] = value + else: + # GH#37833 np.putmask is more performant than __setitem__ + np.putmask(values, mask, value) + + +def putmask_without_repeat( + values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any +) -> None: + """ + np.putmask will truncate or repeat if `new` is a listlike with + len(new) != len(values). We require an exact match. + + Parameters + ---------- + values : np.ndarray + mask : np.ndarray[bool] + new : Any + """ + if getattr(new, "ndim", 0) >= 1: + new = new.astype(values.dtype, copy=False) + + # TODO: this prob needs some better checking for 2D cases + nlocs = mask.sum() + if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1: + shape = np.shape(new) + # np.shape compat for if setitem_datetimelike_compat + # changed arraylike to list e.g. test_where_dt64_2d + if nlocs == shape[-1]: + # GH#30567 + # If length of ``new`` is less than the length of ``values``, + # `np.putmask` would first repeat the ``new`` array and then + # assign the masked values hence produces incorrect result. + # `np.place` on the other hand uses the ``new`` values at it is + # to place in the masked locations of ``values`` + np.place(values, mask, new) + # i.e. values[mask] = new + elif mask.shape[-1] == shape[-1] or shape[-1] == 1: + np.putmask(values, mask, new) + else: + raise ValueError("cannot assign mismatch length to masked array") + else: + np.putmask(values, mask, new) + + +def validate_putmask( + values: ArrayLike | MultiIndex, mask: np.ndarray +) -> tuple[npt.NDArray[np.bool_], bool]: + """ + Validate mask and check if this putmask operation is a no-op. + """ + mask = extract_bool_array(mask) + if mask.shape != values.shape: + raise ValueError("putmask: mask and data must be the same size") + + noop = not mask.any() + return mask, noop + + +def extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]: + """ + If we have a SparseArray or BooleanArray, convert it to ndarray[bool]. + """ + if isinstance(mask, ExtensionArray): + # We could have BooleanArray, Sparse[bool], ... + # Except for BooleanArray, this is equivalent to just + # np.asarray(mask, dtype=bool) + mask = mask.to_numpy(dtype=bool, na_value=False) + + mask = np.asarray(mask, dtype=bool) + return mask + + +def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other): + """ + Parameters + ---------- + values : np.ndarray + num_set : int + For putmask, this is mask.sum() + other : Any + """ + if values.dtype == object: + dtype, _ = infer_dtype_from(other) + + if lib.is_np_dtype(dtype, "mM"): + # https://github.com/numpy/numpy/issues/12550 + # timedelta64 will incorrectly cast to int + if not is_list_like(other): + other = [other] * num_set + else: + other = list(other) + + return other diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py new file mode 100644 index 0000000000000000000000000000000000000000..5c933294fb944f04dd3e9a64e4731ea4349254f9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py @@ -0,0 +1,226 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Scalar, + npt, + ) + + +def quantile_compat( + values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str +) -> ArrayLike: + """ + Compute the quantiles of the given values for each quantile in `qs`. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + qs : np.ndarray[float64] + interpolation : str + + Returns + ------- + np.ndarray or ExtensionArray + """ + if isinstance(values, np.ndarray): + fill_value = na_value_for_dtype(values.dtype, compat=False) + mask = isna(values) + return quantile_with_mask(values, mask, fill_value, qs, interpolation) + else: + return values._quantile(qs, interpolation) + + +def quantile_with_mask( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + fill_value, + qs: npt.NDArray[np.float64], + interpolation: str, +) -> np.ndarray: + """ + Compute the quantiles of the given values for each quantile in `qs`. + + Parameters + ---------- + values : np.ndarray + For ExtensionArray, this is _values_for_factorize()[0] + mask : np.ndarray[bool] + mask = isna(values) + For ExtensionArray, this is computed before calling _value_for_factorize + fill_value : Scalar + The value to interpret fill NA entries with + For ExtensionArray, this is _values_for_factorize()[1] + qs : np.ndarray[float64] + interpolation : str + Type of interpolation + + Returns + ------- + np.ndarray + + Notes + ----- + Assumes values is already 2D. For ExtensionArray this means np.atleast_2d + has been called on _values_for_factorize()[0] + + Quantile is computed along axis=1. + """ + assert values.shape == mask.shape + if values.ndim == 1: + # unsqueeze, operate, re-squeeze + values = np.atleast_2d(values) + mask = np.atleast_2d(mask) + res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation) + return res_values[0] + + assert values.ndim == 2 + + is_empty = values.shape[1] == 0 + + if is_empty: + # create the array of na_values + # 2d len(values) * len(qs) + flat = np.array([fill_value] * len(qs)) + result = np.repeat(flat, len(values)).reshape(len(values), len(qs)) + else: + result = _nanpercentile( + values, + qs * 100.0, + na_value=fill_value, + mask=mask, + interpolation=interpolation, + ) + + result = np.asarray(result) + result = result.T + + return result + + +def _nanpercentile_1d( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + qs: npt.NDArray[np.float64], + na_value: Scalar, + interpolation: str, +) -> Scalar | np.ndarray: + """ + Wrapper for np.percentile that skips missing values, specialized to + 1-dimensional case. + + Parameters + ---------- + values : array over which to find quantiles + mask : ndarray[bool] + locations in values that should be considered missing + qs : np.ndarray[float64] of quantile indices to find + na_value : scalar + value to return for empty or all-null values + interpolation : str + + Returns + ------- + quantiles : scalar or array + """ + # mask is Union[ExtensionArray, ndarray] + values = values[~mask] + + if len(values) == 0: + # Can't pass dtype=values.dtype here bc we might have na_value=np.nan + # with values.dtype=int64 see test_quantile_empty + # equiv: 'np.array([na_value] * len(qs))' but much faster + return np.full(len(qs), na_value) + + return np.percentile( + values, + qs, + # error: No overload variant of "percentile" matches argument + # types "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]" + # , "Dict[str, str]" [call-overload] + method=interpolation, # type: ignore[call-overload] + ) + + +def _nanpercentile( + values: np.ndarray, + qs: npt.NDArray[np.float64], + *, + na_value, + mask: npt.NDArray[np.bool_], + interpolation: str, +): + """ + Wrapper for np.percentile that skips missing values. + + Parameters + ---------- + values : np.ndarray[ndim=2] over which to find quantiles + qs : np.ndarray[float64] of quantile indices to find + na_value : scalar + value to return for empty or all-null values + mask : np.ndarray[bool] + locations in values that should be considered missing + interpolation : str + + Returns + ------- + quantiles : scalar or array + """ + + if values.dtype.kind in "mM": + # need to cast to integer to avoid rounding errors in numpy + result = _nanpercentile( + values.view("i8"), + qs=qs, + na_value=na_value.view("i8"), + mask=mask, + interpolation=interpolation, + ) + + # Note: we have to do `astype` and not view because in general we + # have float result at this point, not i8 + return result.astype(values.dtype) + + if mask.any(): + # Caller is responsible for ensuring mask shape match + assert mask.shape == values.shape + result = [ + _nanpercentile_1d(val, m, qs, na_value, interpolation=interpolation) + for (val, m) in zip(list(values), list(mask)) + ] + if values.dtype.kind == "f": + # preserve itemsize + result = np.asarray(result, dtype=values.dtype).T + else: + result = np.asarray(result).T + if ( + result.dtype != values.dtype + and not mask.all() + and (result == result.astype(values.dtype, copy=False)).all() + ): + # mask.all() will never get cast back to int + # e.g. values id integer dtype and result is floating dtype, + # only cast back to integer dtype if result values are all-integer. + result = result.astype(values.dtype, copy=False) + return result + else: + return np.percentile( + values, + qs, + axis=1, + # error: No overload variant of "percentile" matches argument types + # "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]", + # "int", "Dict[str, str]" [call-overload] + method=interpolation, # type: ignore[call-overload] + ) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/replace.py b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/replace.py new file mode 100644 index 0000000000000000000000000000000000000000..5f377276be480ec4d01c8cd1671fa95f9504c7c6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/replace.py @@ -0,0 +1,152 @@ +""" +Methods used by Block.replace and related methods. +""" +from __future__ import annotations + +import operator +import re +from re import Pattern +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas.core.dtypes.common import ( + is_bool, + is_re, + is_re_compilable, +) +from pandas.core.dtypes.missing import isna + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Scalar, + npt, + ) + + +def should_use_regex(regex: bool, to_replace: Any) -> bool: + """ + Decide whether to treat `to_replace` as a regular expression. + """ + if is_re(to_replace): + regex = True + + regex = regex and is_re_compilable(to_replace) + + # Don't use regex if the pattern is empty. + regex = regex and re.compile(to_replace).pattern != "" + return regex + + +def compare_or_regex_search( + a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_] +) -> ArrayLike: + """ + Compare two array-like inputs of the same shape or two scalar values + + Calls operator.eq or re.search, depending on regex argument. If regex is + True, perform an element-wise regex matching. + + Parameters + ---------- + a : array-like + b : scalar or regex pattern + regex : bool + mask : np.ndarray[bool] + + Returns + ------- + mask : array-like of bool + """ + if isna(b): + return ~mask + + def _check_comparison_types( + result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern + ): + """ + Raises an error if the two arrays (a,b) cannot be compared. + Otherwise, returns the comparison result as expected. + """ + if is_bool(result) and isinstance(a, np.ndarray): + type_names = [type(a).__name__, type(b).__name__] + + type_names[0] = f"ndarray(dtype={a.dtype})" + + raise TypeError( + f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}" + ) + + if not regex or not should_use_regex(regex, b): + # TODO: should use missing.mask_missing? + op = lambda x: operator.eq(x, b) + else: + op = np.vectorize( + lambda x: bool(re.search(b, x)) + if isinstance(x, str) and isinstance(b, (str, Pattern)) + else False + ) + + # GH#32621 use mask to avoid comparing to NAs + if isinstance(a, np.ndarray): + a = a[mask] + + result = op(a) + + if isinstance(result, np.ndarray) and mask is not None: + # The shape of the mask can differ to that of the result + # since we may compare only a subset of a's or b's elements + tmp = np.zeros(mask.shape, dtype=np.bool_) + np.place(tmp, mask, result) + result = tmp + + _check_comparison_types(result, a, b) + return result + + +def replace_regex( + values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None +) -> None: + """ + Parameters + ---------- + values : ArrayLike + Object dtype. + rx : re.Pattern + value : Any + mask : np.ndarray[bool], optional + + Notes + ----- + Alters values in-place. + """ + + # deal with replacing values with objects (strings) that match but + # whose replacement is not a string (numeric, nan, object) + if isna(value) or not isinstance(value, str): + + def re_replacer(s): + if is_re(rx) and isinstance(s, str): + return value if rx.search(s) is not None else s + else: + return s + + else: + # value is guaranteed to be a string here, s can be either a string + # or null if it's null it gets returned + def re_replacer(s): + if is_re(rx) and isinstance(s, str): + return rx.sub(value, s) + else: + return s + + f = np.vectorize(re_replacer, otypes=[np.object_]) + + if mask is None: + values[:] = f(values) + else: + values[mask] = f(values[mask]) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/take.py b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/take.py new file mode 100644 index 0000000000000000000000000000000000000000..ac674e31586e72040a8e1313f232a00299b961ee --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/take.py @@ -0,0 +1,594 @@ +from __future__ import annotations + +import functools +from typing import ( + TYPE_CHECKING, + cast, + overload, +) + +import numpy as np + +from pandas._libs import ( + algos as libalgos, + lib, +) + +from pandas.core.dtypes.cast import maybe_promote +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_1d_only_ea_dtype, +) +from pandas.core.dtypes.missing import na_value_for_dtype + +from pandas.core.construction import ensure_wrapped_if_datetimelike + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + AxisInt, + npt, + ) + + from pandas.core.arrays._mixins import NDArrayBackedExtensionArray + from pandas.core.arrays.base import ExtensionArray + + +@overload +def take_nd( + arr: np.ndarray, + indexer, + axis: AxisInt = ..., + fill_value=..., + allow_fill: bool = ..., +) -> np.ndarray: + ... + + +@overload +def take_nd( + arr: ExtensionArray, + indexer, + axis: AxisInt = ..., + fill_value=..., + allow_fill: bool = ..., +) -> ArrayLike: + ... + + +def take_nd( + arr: ArrayLike, + indexer, + axis: AxisInt = 0, + fill_value=lib.no_default, + allow_fill: bool = True, +) -> ArrayLike: + """ + Specialized Cython take which sets NaN values in one pass + + This dispatches to ``take`` defined on ExtensionArrays. + + Note: this function assumes that the indexer is a valid(ated) indexer with + no out of bound indices. + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + Input array. + indexer : ndarray + 1-D array of indices to take, subarrays corresponding to -1 value + indices are filed with fill_value + axis : int, default 0 + Axis to take from + fill_value : any, default np.nan + Fill value to replace -1 values with + allow_fill : bool, default True + If False, indexer is assumed to contain no -1 values so no filling + will be done. This short-circuits computation of a mask. Result is + undefined if allow_fill == False and -1 is present in indexer. + + Returns + ------- + subarray : np.ndarray or ExtensionArray + May be the same type as the input, or cast to an ndarray. + """ + if fill_value is lib.no_default: + fill_value = na_value_for_dtype(arr.dtype, compat=False) + elif lib.is_np_dtype(arr.dtype, "mM"): + dtype, fill_value = maybe_promote(arr.dtype, fill_value) + if arr.dtype != dtype: + # EA.take is strict about returning a new object of the same type + # so for that case cast upfront + arr = arr.astype(dtype) + + if not isinstance(arr, np.ndarray): + # i.e. ExtensionArray, + # includes for EA to catch DatetimeArray, TimedeltaArray + if not is_1d_only_ea_dtype(arr.dtype): + # i.e. DatetimeArray, TimedeltaArray + arr = cast("NDArrayBackedExtensionArray", arr) + return arr.take( + indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis + ) + + return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) + + arr = np.asarray(arr) + return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill) + + +def _take_nd_ndarray( + arr: np.ndarray, + indexer: npt.NDArray[np.intp] | None, + axis: AxisInt, + fill_value, + allow_fill: bool, +) -> np.ndarray: + if indexer is None: + indexer = np.arange(arr.shape[axis], dtype=np.intp) + dtype, fill_value = arr.dtype, arr.dtype.type() + else: + indexer = ensure_platform_int(indexer) + + dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( + arr, indexer, fill_value, allow_fill + ) + + flip_order = False + if arr.ndim == 2 and arr.flags.f_contiguous: + flip_order = True + + if flip_order: + arr = arr.T + axis = arr.ndim - axis - 1 + + # at this point, it's guaranteed that dtype can hold both the arr values + # and the fill_value + out_shape_ = list(arr.shape) + out_shape_[axis] = len(indexer) + out_shape = tuple(out_shape_) + if arr.flags.f_contiguous and axis == arr.ndim - 1: + # minor tweak that can make an order-of-magnitude difference + # for dataframes initialized directly from 2-d ndarrays + # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its + # f-contiguous transpose) + out = np.empty(out_shape, dtype=dtype, order="F") + else: + out = np.empty(out_shape, dtype=dtype) + + func = _get_take_nd_function( + arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info + ) + func(arr, indexer, out, fill_value) + + if flip_order: + out = out.T + return out + + +def take_1d( + arr: ArrayLike, + indexer: npt.NDArray[np.intp], + fill_value=None, + allow_fill: bool = True, + mask: npt.NDArray[np.bool_] | None = None, +) -> ArrayLike: + """ + Specialized version for 1D arrays. Differences compared to `take_nd`: + + - Assumes input array has already been converted to numpy array / EA + - Assumes indexer is already guaranteed to be intp dtype ndarray + - Only works for 1D arrays + + To ensure the lowest possible overhead. + + Note: similarly to `take_nd`, this function assumes that the indexer is + a valid(ated) indexer with no out of bound indices. + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + Input array. + indexer : ndarray + 1-D array of indices to take (validated indices, intp dtype). + fill_value : any, default np.nan + Fill value to replace -1 values with + allow_fill : bool, default True + If False, indexer is assumed to contain no -1 values so no filling + will be done. This short-circuits computation of a mask. Result is + undefined if allow_fill == False and -1 is present in indexer. + mask : np.ndarray, optional, default None + If `allow_fill` is True, and the mask (where indexer == -1) is already + known, it can be passed to avoid recomputation. + """ + if not isinstance(arr, np.ndarray): + # ExtensionArray -> dispatch to their method + return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) + + if not allow_fill: + return arr.take(indexer) + + dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( + arr, indexer, fill_value, True, mask + ) + + # at this point, it's guaranteed that dtype can hold both the arr values + # and the fill_value + out = np.empty(indexer.shape, dtype=dtype) + + func = _get_take_nd_function( + arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info + ) + func(arr, indexer, out, fill_value) + + return out + + +def take_2d_multi( + arr: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + fill_value=np.nan, +) -> np.ndarray: + """ + Specialized Cython take which sets NaN values in one pass. + """ + # This is only called from one place in DataFrame._reindex_multi, + # so we know indexer is well-behaved. + assert indexer is not None + assert indexer[0] is not None + assert indexer[1] is not None + + row_idx, col_idx = indexer + + row_idx = ensure_platform_int(row_idx) + col_idx = ensure_platform_int(col_idx) + indexer = row_idx, col_idx + mask_info = None + + # check for promotion based on types only (do this first because + # it's faster than computing a mask) + dtype, fill_value = maybe_promote(arr.dtype, fill_value) + if dtype != arr.dtype: + # check if promotion is actually required based on indexer + row_mask = row_idx == -1 + col_mask = col_idx == -1 + row_needs = row_mask.any() + col_needs = col_mask.any() + mask_info = (row_mask, col_mask), (row_needs, col_needs) + + if not (row_needs or col_needs): + # if not, then depromote, set fill_value to dummy + # (it won't be used but we don't want the cython code + # to crash when trying to cast it to dtype) + dtype, fill_value = arr.dtype, arr.dtype.type() + + # at this point, it's guaranteed that dtype can hold both the arr values + # and the fill_value + out_shape = len(row_idx), len(col_idx) + out = np.empty(out_shape, dtype=dtype) + + func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None) + if func is None and arr.dtype != out.dtype: + func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None) + if func is not None: + func = _convert_wrapper(func, out.dtype) + + if func is not None: + func(arr, indexer, out=out, fill_value=fill_value) + else: + # test_reindex_multi + _take_2d_multi_object( + arr, indexer, out, fill_value=fill_value, mask_info=mask_info + ) + + return out + + +@functools.lru_cache +def _get_take_nd_function_cached( + ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt +): + """ + Part of _get_take_nd_function below that doesn't need `mask_info` and thus + can be cached (mask_info potentially contains a numpy ndarray which is not + hashable and thus cannot be used as argument for cached function). + """ + tup = (arr_dtype.name, out_dtype.name) + if ndim == 1: + func = _take_1d_dict.get(tup, None) + elif ndim == 2: + if axis == 0: + func = _take_2d_axis0_dict.get(tup, None) + else: + func = _take_2d_axis1_dict.get(tup, None) + if func is not None: + return func + + # We get here with string, uint, float16, and complex dtypes that could + # potentially be handled in algos_take_helper. + # Also a couple with (M8[ns], object) and (m8[ns], object) + tup = (out_dtype.name, out_dtype.name) + if ndim == 1: + func = _take_1d_dict.get(tup, None) + elif ndim == 2: + if axis == 0: + func = _take_2d_axis0_dict.get(tup, None) + else: + func = _take_2d_axis1_dict.get(tup, None) + if func is not None: + func = _convert_wrapper(func, out_dtype) + return func + + return None + + +def _get_take_nd_function( + ndim: int, + arr_dtype: np.dtype, + out_dtype: np.dtype, + axis: AxisInt = 0, + mask_info=None, +): + """ + Get the appropriate "take" implementation for the given dimension, axis + and dtypes. + """ + func = None + if ndim <= 2: + # for this part we don't need `mask_info` -> use the cached algo lookup + func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis) + + if func is None: + + def func(arr, indexer, out, fill_value=np.nan) -> None: + indexer = ensure_platform_int(indexer) + _take_nd_object( + arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info + ) + + return func + + +def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None): + def wrapper( + arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan + ) -> None: + if arr_dtype is not None: + arr = arr.view(arr_dtype) + if out_dtype is not None: + out = out.view(out_dtype) + if fill_wrap is not None: + # FIXME: if we get here with dt64/td64 we need to be sure we have + # matching resos + if fill_value.dtype.kind == "m": + fill_value = fill_value.astype("m8[ns]") + else: + fill_value = fill_value.astype("M8[ns]") + fill_value = fill_wrap(fill_value) + + f(arr, indexer, out, fill_value=fill_value) + + return wrapper + + +def _convert_wrapper(f, conv_dtype): + def wrapper( + arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan + ) -> None: + if conv_dtype == object: + # GH#39755 avoid casting dt64/td64 to integers + arr = ensure_wrapped_if_datetimelike(arr) + arr = arr.astype(conv_dtype) + f(arr, indexer, out, fill_value=fill_value) + + return wrapper + + +_take_1d_dict = { + ("int8", "int8"): libalgos.take_1d_int8_int8, + ("int8", "int32"): libalgos.take_1d_int8_int32, + ("int8", "int64"): libalgos.take_1d_int8_int64, + ("int8", "float64"): libalgos.take_1d_int8_float64, + ("int16", "int16"): libalgos.take_1d_int16_int16, + ("int16", "int32"): libalgos.take_1d_int16_int32, + ("int16", "int64"): libalgos.take_1d_int16_int64, + ("int16", "float64"): libalgos.take_1d_int16_float64, + ("int32", "int32"): libalgos.take_1d_int32_int32, + ("int32", "int64"): libalgos.take_1d_int32_int64, + ("int32", "float64"): libalgos.take_1d_int32_float64, + ("int64", "int64"): libalgos.take_1d_int64_int64, + ("int64", "float64"): libalgos.take_1d_int64_float64, + ("float32", "float32"): libalgos.take_1d_float32_float32, + ("float32", "float64"): libalgos.take_1d_float32_float64, + ("float64", "float64"): libalgos.take_1d_float64_float64, + ("object", "object"): libalgos.take_1d_object_object, + ("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8), + ("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64 + ), +} + +_take_2d_axis0_dict = { + ("int8", "int8"): libalgos.take_2d_axis0_int8_int8, + ("int8", "int32"): libalgos.take_2d_axis0_int8_int32, + ("int8", "int64"): libalgos.take_2d_axis0_int8_int64, + ("int8", "float64"): libalgos.take_2d_axis0_int8_float64, + ("int16", "int16"): libalgos.take_2d_axis0_int16_int16, + ("int16", "int32"): libalgos.take_2d_axis0_int16_int32, + ("int16", "int64"): libalgos.take_2d_axis0_int16_int64, + ("int16", "float64"): libalgos.take_2d_axis0_int16_float64, + ("int32", "int32"): libalgos.take_2d_axis0_int32_int32, + ("int32", "int64"): libalgos.take_2d_axis0_int32_int64, + ("int32", "float64"): libalgos.take_2d_axis0_int32_float64, + ("int64", "int64"): libalgos.take_2d_axis0_int64_int64, + ("int64", "float64"): libalgos.take_2d_axis0_int64_float64, + ("float32", "float32"): libalgos.take_2d_axis0_float32_float32, + ("float32", "float64"): libalgos.take_2d_axis0_float32_float64, + ("float64", "float64"): libalgos.take_2d_axis0_float64_float64, + ("object", "object"): libalgos.take_2d_axis0_object_object, + ("bool", "bool"): _view_wrapper( + libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8 + ), + ("bool", "object"): _view_wrapper( + libalgos.take_2d_axis0_bool_object, np.uint8, None + ), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), +} + +_take_2d_axis1_dict = { + ("int8", "int8"): libalgos.take_2d_axis1_int8_int8, + ("int8", "int32"): libalgos.take_2d_axis1_int8_int32, + ("int8", "int64"): libalgos.take_2d_axis1_int8_int64, + ("int8", "float64"): libalgos.take_2d_axis1_int8_float64, + ("int16", "int16"): libalgos.take_2d_axis1_int16_int16, + ("int16", "int32"): libalgos.take_2d_axis1_int16_int32, + ("int16", "int64"): libalgos.take_2d_axis1_int16_int64, + ("int16", "float64"): libalgos.take_2d_axis1_int16_float64, + ("int32", "int32"): libalgos.take_2d_axis1_int32_int32, + ("int32", "int64"): libalgos.take_2d_axis1_int32_int64, + ("int32", "float64"): libalgos.take_2d_axis1_int32_float64, + ("int64", "int64"): libalgos.take_2d_axis1_int64_int64, + ("int64", "float64"): libalgos.take_2d_axis1_int64_float64, + ("float32", "float32"): libalgos.take_2d_axis1_float32_float32, + ("float32", "float64"): libalgos.take_2d_axis1_float32_float64, + ("float64", "float64"): libalgos.take_2d_axis1_float64_float64, + ("object", "object"): libalgos.take_2d_axis1_object_object, + ("bool", "bool"): _view_wrapper( + libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8 + ), + ("bool", "object"): _view_wrapper( + libalgos.take_2d_axis1_bool_object, np.uint8, None + ), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), +} + +_take_2d_multi_dict = { + ("int8", "int8"): libalgos.take_2d_multi_int8_int8, + ("int8", "int32"): libalgos.take_2d_multi_int8_int32, + ("int8", "int64"): libalgos.take_2d_multi_int8_int64, + ("int8", "float64"): libalgos.take_2d_multi_int8_float64, + ("int16", "int16"): libalgos.take_2d_multi_int16_int16, + ("int16", "int32"): libalgos.take_2d_multi_int16_int32, + ("int16", "int64"): libalgos.take_2d_multi_int16_int64, + ("int16", "float64"): libalgos.take_2d_multi_int16_float64, + ("int32", "int32"): libalgos.take_2d_multi_int32_int32, + ("int32", "int64"): libalgos.take_2d_multi_int32_int64, + ("int32", "float64"): libalgos.take_2d_multi_int32_float64, + ("int64", "int64"): libalgos.take_2d_multi_int64_int64, + ("int64", "float64"): libalgos.take_2d_multi_int64_float64, + ("float32", "float32"): libalgos.take_2d_multi_float32_float32, + ("float32", "float64"): libalgos.take_2d_multi_float32_float64, + ("float64", "float64"): libalgos.take_2d_multi_float64_float64, + ("object", "object"): libalgos.take_2d_multi_object_object, + ("bool", "bool"): _view_wrapper( + libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8 + ), + ("bool", "object"): _view_wrapper( + libalgos.take_2d_multi_bool_object, np.uint8, None + ), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), +} + + +def _take_nd_object( + arr: np.ndarray, + indexer: npt.NDArray[np.intp], + out: np.ndarray, + axis: AxisInt, + fill_value, + mask_info, +) -> None: + if mask_info is not None: + mask, needs_masking = mask_info + else: + mask = indexer == -1 + needs_masking = mask.any() + if arr.dtype != out.dtype: + arr = arr.astype(out.dtype) + if arr.shape[axis] > 0: + arr.take(indexer, axis=axis, out=out) + if needs_masking: + outindexer = [slice(None)] * arr.ndim + outindexer[axis] = mask + out[tuple(outindexer)] = fill_value + + +def _take_2d_multi_object( + arr: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value, + mask_info, +) -> None: + # this is not ideal, performance-wise, but it's better than raising + # an exception (best to optimize in Cython to avoid getting here) + row_idx, col_idx = indexer # both np.intp + if mask_info is not None: + (row_mask, col_mask), (row_needs, col_needs) = mask_info + else: + row_mask = row_idx == -1 + col_mask = col_idx == -1 + row_needs = row_mask.any() + col_needs = col_mask.any() + if fill_value is not None: + if row_needs: + out[row_mask, :] = fill_value + if col_needs: + out[:, col_mask] = fill_value + for i, u_ in enumerate(row_idx): + if u_ != -1: + for j, v in enumerate(col_idx): + if v != -1: + out[i, j] = arr[u_, v] + + +def _take_preprocess_indexer_and_fill_value( + arr: np.ndarray, + indexer: npt.NDArray[np.intp], + fill_value, + allow_fill: bool, + mask: npt.NDArray[np.bool_] | None = None, +): + mask_info: tuple[np.ndarray | None, bool] | None = None + + if not allow_fill: + dtype, fill_value = arr.dtype, arr.dtype.type() + mask_info = None, False + else: + # check for promotion based on types only (do this first because + # it's faster than computing a mask) + dtype, fill_value = maybe_promote(arr.dtype, fill_value) + if dtype != arr.dtype: + # check if promotion is actually required based on indexer + if mask is not None: + needs_masking = True + else: + mask = indexer == -1 + needs_masking = bool(mask.any()) + mask_info = mask, needs_masking + if not needs_masking: + # if not, then depromote, set fill_value to dummy + # (it won't be used but we don't want the cython code + # to crash when trying to cast it to dtype) + dtype, fill_value = arr.dtype, arr.dtype.type() + + return dtype, fill_value, mask_info diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..ec67244949e3db92cc811b19cdfcd5d1dd2b4de8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py @@ -0,0 +1,50 @@ +""" +transforms.py is for shape-preserving functions. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import ( + AxisInt, + Scalar, + ) + + +def shift( + values: np.ndarray, periods: int, axis: AxisInt, fill_value: Scalar +) -> np.ndarray: + new_values = values + + if periods == 0 or values.size == 0: + return new_values.copy() + + # make sure array sent to np.roll is c_contiguous + f_ordered = values.flags.f_contiguous + if f_ordered: + new_values = new_values.T + axis = new_values.ndim - axis - 1 + + if new_values.size: + new_values = np.roll( + new_values, + np.intp(periods), + axis=axis, + ) + + axis_indexer = [slice(None)] * values.ndim + if periods > 0: + axis_indexer[axis] = slice(None, periods) + else: + axis_indexer[axis] = slice(periods, None) + new_values[tuple(axis_indexer)] = fill_value + + # restore original order + if f_ordered: + new_values = new_values.T + + return new_values diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/arraylike.py b/videollama2/lib/python3.10/site-packages/pandas/core/arraylike.py new file mode 100644 index 0000000000000000000000000000000000000000..fd585b3e1946880ca79f97b0b6d4a2990bdd32d1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/arraylike.py @@ -0,0 +1,530 @@ +""" +Methods that can be shared by many array-like classes or subclasses: + Series + Index + ExtensionArray +""" +from __future__ import annotations + +import operator +from typing import Any + +import numpy as np + +from pandas._libs import lib +from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op + +from pandas.core.dtypes.generic import ABCNDFrame + +from pandas.core import roperator +from pandas.core.construction import extract_array +from pandas.core.ops.common import unpack_zerodim_and_defer + +REDUCTION_ALIASES = { + "maximum": "max", + "minimum": "min", + "add": "sum", + "multiply": "prod", +} + + +class OpsMixin: + # ------------------------------------------------------------- + # Comparisons + + def _cmp_method(self, other, op): + return NotImplemented + + @unpack_zerodim_and_defer("__eq__") + def __eq__(self, other): + return self._cmp_method(other, operator.eq) + + @unpack_zerodim_and_defer("__ne__") + def __ne__(self, other): + return self._cmp_method(other, operator.ne) + + @unpack_zerodim_and_defer("__lt__") + def __lt__(self, other): + return self._cmp_method(other, operator.lt) + + @unpack_zerodim_and_defer("__le__") + def __le__(self, other): + return self._cmp_method(other, operator.le) + + @unpack_zerodim_and_defer("__gt__") + def __gt__(self, other): + return self._cmp_method(other, operator.gt) + + @unpack_zerodim_and_defer("__ge__") + def __ge__(self, other): + return self._cmp_method(other, operator.ge) + + # ------------------------------------------------------------- + # Logical Methods + + def _logical_method(self, other, op): + return NotImplemented + + @unpack_zerodim_and_defer("__and__") + def __and__(self, other): + return self._logical_method(other, operator.and_) + + @unpack_zerodim_and_defer("__rand__") + def __rand__(self, other): + return self._logical_method(other, roperator.rand_) + + @unpack_zerodim_and_defer("__or__") + def __or__(self, other): + return self._logical_method(other, operator.or_) + + @unpack_zerodim_and_defer("__ror__") + def __ror__(self, other): + return self._logical_method(other, roperator.ror_) + + @unpack_zerodim_and_defer("__xor__") + def __xor__(self, other): + return self._logical_method(other, operator.xor) + + @unpack_zerodim_and_defer("__rxor__") + def __rxor__(self, other): + return self._logical_method(other, roperator.rxor) + + # ------------------------------------------------------------- + # Arithmetic Methods + + def _arith_method(self, other, op): + return NotImplemented + + @unpack_zerodim_and_defer("__add__") + def __add__(self, other): + """ + Get Addition of DataFrame and other, column-wise. + + Equivalent to ``DataFrame.add(other)``. + + Parameters + ---------- + other : scalar, sequence, Series, dict or DataFrame + Object to be added to the DataFrame. + + Returns + ------- + DataFrame + The result of adding ``other`` to DataFrame. + + See Also + -------- + DataFrame.add : Add a DataFrame and another object, with option for index- + or column-oriented addition. + + Examples + -------- + >>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]}, + ... index=['elk', 'moose']) + >>> df + height weight + elk 1.5 500 + moose 2.6 800 + + Adding a scalar affects all rows and columns. + + >>> df[['height', 'weight']] + 1.5 + height weight + elk 3.0 501.5 + moose 4.1 801.5 + + Each element of a list is added to a column of the DataFrame, in order. + + >>> df[['height', 'weight']] + [0.5, 1.5] + height weight + elk 2.0 501.5 + moose 3.1 801.5 + + Keys of a dictionary are aligned to the DataFrame, based on column names; + each value in the dictionary is added to the corresponding column. + + >>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5} + height weight + elk 2.0 501.5 + moose 3.1 801.5 + + When `other` is a :class:`Series`, the index of `other` is aligned with the + columns of the DataFrame. + + >>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height']) + >>> df[['height', 'weight']] + s1 + height weight + elk 3.0 500.5 + moose 4.1 800.5 + + Even when the index of `other` is the same as the index of the DataFrame, + the :class:`Series` will not be reoriented. If index-wise alignment is desired, + :meth:`DataFrame.add` should be used with `axis='index'`. + + >>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose']) + >>> df[['height', 'weight']] + s2 + elk height moose weight + elk NaN NaN NaN NaN + moose NaN NaN NaN NaN + + >>> df[['height', 'weight']].add(s2, axis='index') + height weight + elk 2.0 500.5 + moose 4.1 801.5 + + When `other` is a :class:`DataFrame`, both columns names and the + index are aligned. + + >>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]}, + ... index=['elk', 'moose', 'deer']) + >>> df[['height', 'weight']] + other + height weight + deer NaN NaN + elk 1.7 NaN + moose 3.0 NaN + """ + return self._arith_method(other, operator.add) + + @unpack_zerodim_and_defer("__radd__") + def __radd__(self, other): + return self._arith_method(other, roperator.radd) + + @unpack_zerodim_and_defer("__sub__") + def __sub__(self, other): + return self._arith_method(other, operator.sub) + + @unpack_zerodim_and_defer("__rsub__") + def __rsub__(self, other): + return self._arith_method(other, roperator.rsub) + + @unpack_zerodim_and_defer("__mul__") + def __mul__(self, other): + return self._arith_method(other, operator.mul) + + @unpack_zerodim_and_defer("__rmul__") + def __rmul__(self, other): + return self._arith_method(other, roperator.rmul) + + @unpack_zerodim_and_defer("__truediv__") + def __truediv__(self, other): + return self._arith_method(other, operator.truediv) + + @unpack_zerodim_and_defer("__rtruediv__") + def __rtruediv__(self, other): + return self._arith_method(other, roperator.rtruediv) + + @unpack_zerodim_and_defer("__floordiv__") + def __floordiv__(self, other): + return self._arith_method(other, operator.floordiv) + + @unpack_zerodim_and_defer("__rfloordiv") + def __rfloordiv__(self, other): + return self._arith_method(other, roperator.rfloordiv) + + @unpack_zerodim_and_defer("__mod__") + def __mod__(self, other): + return self._arith_method(other, operator.mod) + + @unpack_zerodim_and_defer("__rmod__") + def __rmod__(self, other): + return self._arith_method(other, roperator.rmod) + + @unpack_zerodim_and_defer("__divmod__") + def __divmod__(self, other): + return self._arith_method(other, divmod) + + @unpack_zerodim_and_defer("__rdivmod__") + def __rdivmod__(self, other): + return self._arith_method(other, roperator.rdivmod) + + @unpack_zerodim_and_defer("__pow__") + def __pow__(self, other): + return self._arith_method(other, operator.pow) + + @unpack_zerodim_and_defer("__rpow__") + def __rpow__(self, other): + return self._arith_method(other, roperator.rpow) + + +# ----------------------------------------------------------------------------- +# Helpers to implement __array_ufunc__ + + +def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any): + """ + Compatibility with numpy ufuncs. + + See also + -------- + numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__ + """ + from pandas.core.frame import ( + DataFrame, + Series, + ) + from pandas.core.generic import NDFrame + from pandas.core.internals import ( + ArrayManager, + BlockManager, + ) + + cls = type(self) + + kwargs = _standardize_out_kwarg(**kwargs) + + # for binary ops, use our custom dunder methods + result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) + if result is not NotImplemented: + return result + + # Determine if we should defer. + no_defer = ( + np.ndarray.__array_ufunc__, + cls.__array_ufunc__, + ) + + for item in inputs: + higher_priority = ( + hasattr(item, "__array_priority__") + and item.__array_priority__ > self.__array_priority__ + ) + has_array_ufunc = ( + hasattr(item, "__array_ufunc__") + and type(item).__array_ufunc__ not in no_defer + and not isinstance(item, self._HANDLED_TYPES) + ) + if higher_priority or has_array_ufunc: + return NotImplemented + + # align all the inputs. + types = tuple(type(x) for x in inputs) + alignable = [x for x, t in zip(inputs, types) if issubclass(t, NDFrame)] + + if len(alignable) > 1: + # This triggers alignment. + # At the moment, there aren't any ufuncs with more than two inputs + # so this ends up just being x1.index | x2.index, but we write + # it to handle *args. + set_types = set(types) + if len(set_types) > 1 and {DataFrame, Series}.issubset(set_types): + # We currently don't handle ufunc(DataFrame, Series) + # well. Previously this raised an internal ValueError. We might + # support it someday, so raise a NotImplementedError. + raise NotImplementedError( + f"Cannot apply ufunc {ufunc} to mixed DataFrame and Series inputs." + ) + axes = self.axes + for obj in alignable[1:]: + # this relies on the fact that we aren't handling mixed + # series / frame ufuncs. + for i, (ax1, ax2) in enumerate(zip(axes, obj.axes)): + axes[i] = ax1.union(ax2) + + reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes)) + inputs = tuple( + x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x + for x, t in zip(inputs, types) + ) + else: + reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes)) + + if self.ndim == 1: + names = [getattr(x, "name") for x in inputs if hasattr(x, "name")] + name = names[0] if len(set(names)) == 1 else None + reconstruct_kwargs = {"name": name} + else: + reconstruct_kwargs = {} + + def reconstruct(result): + if ufunc.nout > 1: + # np.modf, np.frexp, np.divmod + return tuple(_reconstruct(x) for x in result) + + return _reconstruct(result) + + def _reconstruct(result): + if lib.is_scalar(result): + return result + + if result.ndim != self.ndim: + if method == "outer": + raise NotImplementedError + return result + if isinstance(result, (BlockManager, ArrayManager)): + # we went through BlockManager.apply e.g. np.sqrt + result = self._constructor_from_mgr(result, axes=result.axes) + else: + # we converted an array, lost our axes + result = self._constructor( + result, **reconstruct_axes, **reconstruct_kwargs, copy=False + ) + # TODO: When we support multiple values in __finalize__, this + # should pass alignable to `__finalize__` instead of self. + # Then `np.add(a, b)` would consider attrs from both a and b + # when a and b are NDFrames. + if len(alignable) == 1: + result = result.__finalize__(self) + return result + + if "out" in kwargs: + # e.g. test_multiindex_get_loc + result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs) + return reconstruct(result) + + if method == "reduce": + # e.g. test.series.test_ufunc.test_reduce + result = dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs) + if result is not NotImplemented: + return result + + # We still get here with kwargs `axis` for e.g. np.maximum.accumulate + # and `dtype` and `keepdims` for np.ptp + + if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1): + # Just give up on preserving types in the complex case. + # In theory we could preserve them for them. + # * nout>1 is doable if BlockManager.apply took nout and + # returned a Tuple[BlockManager]. + # * len(inputs) > 1 is doable when we know that we have + # aligned blocks / dtypes. + + # e.g. my_ufunc, modf, logaddexp, heaviside, subtract, add + inputs = tuple(np.asarray(x) for x in inputs) + # Note: we can't use default_array_ufunc here bc reindexing means + # that `self` may not be among `inputs` + result = getattr(ufunc, method)(*inputs, **kwargs) + elif self.ndim == 1: + # ufunc(series, ...) + inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs) + result = getattr(ufunc, method)(*inputs, **kwargs) + else: + # ufunc(dataframe) + if method == "__call__" and not kwargs: + # for np.(..) calls + # kwargs cannot necessarily be handled block-by-block, so only + # take this path if there are no kwargs + mgr = inputs[0]._mgr + result = mgr.apply(getattr(ufunc, method)) + else: + # otherwise specific ufunc methods (eg np..accumulate(..)) + # Those can have an axis keyword and thus can't be called block-by-block + result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs) + # e.g. np.negative (only one reached), with "where" and "out" in kwargs + + result = reconstruct(result) + return result + + +def _standardize_out_kwarg(**kwargs) -> dict: + """ + If kwargs contain "out1" and "out2", replace that with a tuple "out" + + np.divmod, np.modf, np.frexp can have either `out=(out1, out2)` or + `out1=out1, out2=out2)` + """ + if "out" not in kwargs and "out1" in kwargs and "out2" in kwargs: + out1 = kwargs.pop("out1") + out2 = kwargs.pop("out2") + out = (out1, out2) + kwargs["out"] = out + return kwargs + + +def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + """ + If we have an `out` keyword, then call the ufunc without `out` and then + set the result into the given `out`. + """ + + # Note: we assume _standardize_out_kwarg has already been called. + out = kwargs.pop("out") + where = kwargs.pop("where", None) + + result = getattr(ufunc, method)(*inputs, **kwargs) + + if result is NotImplemented: + return NotImplemented + + if isinstance(result, tuple): + # i.e. np.divmod, np.modf, np.frexp + if not isinstance(out, tuple) or len(out) != len(result): + raise NotImplementedError + + for arr, res in zip(out, result): + _assign_where(arr, res, where) + + return out + + if isinstance(out, tuple): + if len(out) == 1: + out = out[0] + else: + raise NotImplementedError + + _assign_where(out, result, where) + return out + + +def _assign_where(out, result, where) -> None: + """ + Set a ufunc result into 'out', masking with a 'where' argument if necessary. + """ + if where is None: + # no 'where' arg passed to ufunc + out[:] = result + else: + np.putmask(out, where, result) + + +def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + """ + Fallback to the behavior we would get if we did not define __array_ufunc__. + + Notes + ----- + We are assuming that `self` is among `inputs`. + """ + if not any(x is self for x in inputs): + raise NotImplementedError + + new_inputs = [x if x is not self else np.asarray(x) for x in inputs] + + return getattr(ufunc, method)(*new_inputs, **kwargs) + + +def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + """ + Dispatch ufunc reductions to self's reduction methods. + """ + assert method == "reduce" + + if len(inputs) != 1 or inputs[0] is not self: + return NotImplemented + + if ufunc.__name__ not in REDUCTION_ALIASES: + return NotImplemented + + method_name = REDUCTION_ALIASES[ufunc.__name__] + + # NB: we are assuming that min/max represent minimum/maximum methods, + # which would not be accurate for e.g. Timestamp.min + if not hasattr(self, method_name): + return NotImplemented + + if self.ndim > 1: + if isinstance(self, ABCNDFrame): + # TODO: test cases where this doesn't hold, i.e. 2D DTA/TDA + kwargs["numeric_only"] = False + + if "axis" not in kwargs: + # For DataFrame reductions we don't want the default axis=0 + # Note: np.min is not a ufunc, but uses array_function_dispatch, + # so calls DataFrame.min (without ever getting here) with the np.min + # default of axis=None, which DataFrame.min catches and changes to axis=0. + # np.minimum.reduce(df) gets here bc axis is not in kwargs, + # so we set axis=0 to match the behaviorof np.minimum.reduce(df.values) + kwargs["axis"] = 0 + + # By default, numpy's reductions do not skip NaNs, so we have to + # pass skipna=False + return getattr(self, method_name)(skipna=False, **kwargs) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/base.py b/videollama2/lib/python3.10/site-packages/pandas/core/base.py new file mode 100644 index 0000000000000000000000000000000000000000..e98f1157572bb91d72708e296702d9d4cd34eee8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/base.py @@ -0,0 +1,1391 @@ +""" +Base and utility classes for pandas objects. +""" + +from __future__ import annotations + +import textwrap +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Literal, + cast, + final, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas._libs import lib +from pandas._typing import ( + AxisInt, + DtypeObj, + IndexLabel, + NDFrameT, + Self, + Shape, + npt, +) +from pandas.compat import PYPY +from pandas.compat.numpy import function as nv +from pandas.errors import AbstractMethodError +from pandas.util._decorators import ( + cache_readonly, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import can_hold_element +from pandas.core.dtypes.common import ( + is_object_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + remove_na_arraylike, +) + +from pandas.core import ( + algorithms, + nanops, + ops, +) +from pandas.core.accessor import DirNamesMixin +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays import ExtensionArray +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + + from pandas._typing import ( + DropKeep, + NumpySorter, + NumpyValueArrayLike, + ScalarLike_co, + ) + + from pandas import ( + DataFrame, + Index, + Series, + ) + + +_shared_docs: dict[str, str] = {} +_indexops_doc_kwargs = { + "klass": "IndexOpsMixin", + "inplace": "", + "unique": "IndexOpsMixin", + "duplicated": "IndexOpsMixin", +} + + +class PandasObject(DirNamesMixin): + """ + Baseclass for various pandas objects. + """ + + # results from calls to methods decorated with cache_readonly get added to _cache + _cache: dict[str, Any] + + @property + def _constructor(self): + """ + Class constructor (for this class it's just `__class__`). + """ + return type(self) + + def __repr__(self) -> str: + """ + Return a string representation for a particular object. + """ + # Should be overwritten by base classes + return object.__repr__(self) + + def _reset_cache(self, key: str | None = None) -> None: + """ + Reset cached properties. If ``key`` is passed, only clears that key. + """ + if not hasattr(self, "_cache"): + return + if key is None: + self._cache.clear() + else: + self._cache.pop(key, None) + + def __sizeof__(self) -> int: + """ + Generates the total memory usage for an object that returns + either a value or Series of values + """ + memory_usage = getattr(self, "memory_usage", None) + if memory_usage: + mem = memory_usage(deep=True) # pylint: disable=not-callable + return int(mem if is_scalar(mem) else mem.sum()) + + # no memory_usage attribute, so fall back to object's 'sizeof' + return super().__sizeof__() + + +class NoNewAttributesMixin: + """ + Mixin which prevents adding new attributes. + + Prevents additional attributes via xxx.attribute = "something" after a + call to `self.__freeze()`. Mainly used to prevent the user from using + wrong attributes on an accessor (`Series.cat/.str/.dt`). + + If you really want to add a new attribute at a later time, you need to use + `object.__setattr__(self, key, value)`. + """ + + def _freeze(self) -> None: + """ + Prevents setting additional attributes. + """ + object.__setattr__(self, "__frozen", True) + + # prevent adding any attribute via s.xxx.new_attribute = ... + def __setattr__(self, key: str, value) -> None: + # _cache is used by a decorator + # We need to check both 1.) cls.__dict__ and 2.) getattr(self, key) + # because + # 1.) getattr is false for attributes that raise errors + # 2.) cls.__dict__ doesn't traverse into base classes + if getattr(self, "__frozen", False) and not ( + key == "_cache" + or key in type(self).__dict__ + or getattr(self, key, None) is not None + ): + raise AttributeError(f"You cannot add any new attribute '{key}'") + object.__setattr__(self, key, value) + + +class SelectionMixin(Generic[NDFrameT]): + """ + mixin implementing the selection & aggregation interface on a group-like + object sub-classes need to define: obj, exclusions + """ + + obj: NDFrameT + _selection: IndexLabel | None = None + exclusions: frozenset[Hashable] + _internal_names = ["_cache", "__setstate__"] + _internal_names_set = set(_internal_names) + + @final + @property + def _selection_list(self): + if not isinstance( + self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray) + ): + return [self._selection] + return self._selection + + @cache_readonly + def _selected_obj(self): + if self._selection is None or isinstance(self.obj, ABCSeries): + return self.obj + else: + return self.obj[self._selection] + + @final + @cache_readonly + def ndim(self) -> int: + return self._selected_obj.ndim + + @final + @cache_readonly + def _obj_with_exclusions(self): + if isinstance(self.obj, ABCSeries): + return self.obj + + if self._selection is not None: + return self.obj._getitem_nocopy(self._selection_list) + + if len(self.exclusions) > 0: + # equivalent to `self.obj.drop(self.exclusions, axis=1) + # but this avoids consolidating and making a copy + # TODO: following GH#45287 can we now use .drop directly without + # making a copy? + return self.obj._drop_axis(self.exclusions, axis=1, only_slice=True) + else: + return self.obj + + def __getitem__(self, key): + if self._selection is not None: + raise IndexError(f"Column(s) {self._selection} already selected") + + if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)): + if len(self.obj.columns.intersection(key)) != len(set(key)): + bad_keys = list(set(key).difference(self.obj.columns)) + raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}") + return self._gotitem(list(key), ndim=2) + + else: + if key not in self.obj: + raise KeyError(f"Column not found: {key}") + ndim = self.obj[key].ndim + return self._gotitem(key, ndim=ndim) + + def _gotitem(self, key, ndim: int, subset=None): + """ + sub-classes to define + return a sliced object + + Parameters + ---------- + key : str / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + raise AbstractMethodError(self) + + @final + def _infer_selection(self, key, subset: Series | DataFrame): + """ + Infer the `selection` to pass to our constructor in _gotitem. + """ + # Shared by Rolling and Resample + selection = None + if subset.ndim == 2 and ( + (lib.is_scalar(key) and key in subset) or lib.is_list_like(key) + ): + selection = key + elif subset.ndim == 1 and lib.is_scalar(key) and key == subset.name: + selection = key + return selection + + def aggregate(self, func, *args, **kwargs): + raise AbstractMethodError(self) + + agg = aggregate + + +class IndexOpsMixin(OpsMixin): + """ + Common ops mixin to support a unified interface / docs for Series / Index + """ + + # ndarray compatibility + __array_priority__ = 1000 + _hidden_attrs: frozenset[str] = frozenset( + ["tolist"] # tolist is not deprecated, just suppressed in the __dir__ + ) + + @property + def dtype(self) -> DtypeObj: + # must be defined here as a property for mypy + raise AbstractMethodError(self) + + @property + def _values(self) -> ExtensionArray | np.ndarray: + # must be defined here as a property for mypy + raise AbstractMethodError(self) + + @final + def transpose(self, *args, **kwargs) -> Self: + """ + Return the transpose, which is by definition self. + + Returns + ------- + %(klass)s + """ + nv.validate_transpose(args, kwargs) + return self + + T = property( + transpose, + doc=""" + Return the transpose, which is by definition self. + + Examples + -------- + For Series: + + >>> s = pd.Series(['Ant', 'Bear', 'Cow']) + >>> s + 0 Ant + 1 Bear + 2 Cow + dtype: object + >>> s.T + 0 Ant + 1 Bear + 2 Cow + dtype: object + + For Index: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx.T + Index([1, 2, 3], dtype='int64') + """, + ) + + @property + def shape(self) -> Shape: + """ + Return a tuple of the shape of the underlying data. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.shape + (3,) + """ + return self._values.shape + + def __len__(self) -> int: + # We need this defined here for mypy + raise AbstractMethodError(self) + + @property + def ndim(self) -> Literal[1]: + """ + Number of dimensions of the underlying data, by definition 1. + + Examples + -------- + >>> s = pd.Series(['Ant', 'Bear', 'Cow']) + >>> s + 0 Ant + 1 Bear + 2 Cow + dtype: object + >>> s.ndim + 1 + + For Index: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.ndim + 1 + """ + return 1 + + @final + def item(self): + """ + Return the first element of the underlying data as a Python scalar. + + Returns + ------- + scalar + The first element of Series or Index. + + Raises + ------ + ValueError + If the data is not length = 1. + + Examples + -------- + >>> s = pd.Series([1]) + >>> s.item() + 1 + + For an index: + + >>> s = pd.Series([1], index=['a']) + >>> s.index.item() + 'a' + """ + if len(self) == 1: + return next(iter(self)) + raise ValueError("can only convert an array of size 1 to a Python scalar") + + @property + def nbytes(self) -> int: + """ + Return the number of bytes in the underlying data. + + Examples + -------- + For Series: + + >>> s = pd.Series(['Ant', 'Bear', 'Cow']) + >>> s + 0 Ant + 1 Bear + 2 Cow + dtype: object + >>> s.nbytes + 24 + + For Index: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.nbytes + 24 + """ + return self._values.nbytes + + @property + def size(self) -> int: + """ + Return the number of elements in the underlying data. + + Examples + -------- + For Series: + + >>> s = pd.Series(['Ant', 'Bear', 'Cow']) + >>> s + 0 Ant + 1 Bear + 2 Cow + dtype: object + >>> s.size + 3 + + For Index: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.size + 3 + """ + return len(self._values) + + @property + def array(self) -> ExtensionArray: + """ + The ExtensionArray of the data backing this Series or Index. + + Returns + ------- + ExtensionArray + An ExtensionArray of the values stored within. For extension + types, this is the actual array. For NumPy native types, this + is a thin (no copy) wrapper around :class:`numpy.ndarray`. + + ``.array`` differs from ``.values``, which may require converting + the data to a different form. + + See Also + -------- + Index.to_numpy : Similar method that always returns a NumPy array. + Series.to_numpy : Similar method that always returns a NumPy array. + + Notes + ----- + This table lays out the different array types for each extension + dtype within pandas. + + ================== ============================= + dtype array type + ================== ============================= + category Categorical + period PeriodArray + interval IntervalArray + IntegerNA IntegerArray + string StringArray + boolean BooleanArray + datetime64[ns, tz] DatetimeArray + ================== ============================= + + For any 3rd-party extension types, the array type will be an + ExtensionArray. + + For all remaining dtypes ``.array`` will be a + :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray + stored within. If you absolutely need a NumPy array (possibly with + copying / coercing data), then use :meth:`Series.to_numpy` instead. + + Examples + -------- + For regular NumPy types like int, and float, a NumpyExtensionArray + is returned. + + >>> pd.Series([1, 2, 3]).array + + [1, 2, 3] + Length: 3, dtype: int64 + + For extension types, like Categorical, the actual ExtensionArray + is returned + + >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) + >>> ser.array + ['a', 'b', 'a'] + Categories (2, object): ['a', 'b'] + """ + raise AbstractMethodError(self) + + @final + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + **kwargs, + ) -> np.ndarray: + """ + A NumPy ndarray representing the values in this Series or Index. + + Parameters + ---------- + dtype : str or numpy.dtype, optional + The dtype to pass to :meth:`numpy.asarray`. + copy : bool, default False + Whether to ensure that the returned value is not a view on + another array. Note that ``copy=False`` does not *ensure* that + ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that + a copy is made, even if not strictly necessary. + na_value : Any, optional + The value to use for missing values. The default value depends + on `dtype` and the type of the array. + **kwargs + Additional keywords passed through to the ``to_numpy`` method + of the underlying array (for extension arrays). + + Returns + ------- + numpy.ndarray + + See Also + -------- + Series.array : Get the actual data stored within. + Index.array : Get the actual data stored within. + DataFrame.to_numpy : Similar method for DataFrame. + + Notes + ----- + The returned array will be the same up to equality (values equal + in `self` will be equal in the returned array; likewise for values + that are not equal). When `self` contains an ExtensionArray, the + dtype may be different. For example, for a category-dtype Series, + ``to_numpy()`` will return a NumPy array and the categorical dtype + will be lost. + + For NumPy dtypes, this will be a reference to the actual data stored + in this Series or Index (assuming ``copy=False``). Modifying the result + in place will modify the data stored in the Series or Index (not that + we recommend doing that). + + For extension types, ``to_numpy()`` *may* require copying data and + coercing the result to a NumPy type (possibly object), which may be + expensive. When you need a no-copy reference to the underlying data, + :attr:`Series.array` should be used instead. + + This table lays out the different dtypes and default return types of + ``to_numpy()`` for various dtypes within pandas. + + ================== ================================ + dtype array type + ================== ================================ + category[T] ndarray[T] (same dtype as input) + period ndarray[object] (Periods) + interval ndarray[object] (Intervals) + IntegerNA ndarray[object] + datetime64[ns] datetime64[ns] + datetime64[ns, tz] ndarray[object] (Timestamps) + ================== ================================ + + Examples + -------- + >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) + >>> ser.to_numpy() + array(['a', 'b', 'a'], dtype=object) + + Specify the `dtype` to control how datetime-aware data is represented. + Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp` + objects, each with the correct ``tz``. + + >>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) + >>> ser.to_numpy(dtype=object) + array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), + Timestamp('2000-01-02 00:00:00+0100', tz='CET')], + dtype=object) + + Or ``dtype='datetime64[ns]'`` to return an ndarray of native + datetime64 values. The values are converted to UTC and the timezone + info is dropped. + + >>> ser.to_numpy(dtype="datetime64[ns]") + ... # doctest: +ELLIPSIS + array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'], + dtype='datetime64[ns]') + """ + if isinstance(self.dtype, ExtensionDtype): + return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs) + elif kwargs: + bad_keys = next(iter(kwargs.keys())) + raise TypeError( + f"to_numpy() got an unexpected keyword argument '{bad_keys}'" + ) + + fillna = ( + na_value is not lib.no_default + # no need to fillna with np.nan if we already have a float dtype + and not (na_value is np.nan and np.issubdtype(self.dtype, np.floating)) + ) + + values = self._values + if fillna: + if not can_hold_element(values, na_value): + # if we can't hold the na_value asarray either makes a copy or we + # error before modifying values. The asarray later on thus won't make + # another copy + values = np.asarray(values, dtype=dtype) + else: + values = values.copy() + + values[np.asanyarray(isna(self))] = na_value + + result = np.asarray(values, dtype=dtype) + + if (copy and not fillna) or (not copy and using_copy_on_write()): + if np.shares_memory(self._values[:2], result[:2]): + # Take slices to improve performance of check + if using_copy_on_write() and not copy: + result = result.view() + result.flags.writeable = False + else: + result = result.copy() + + return result + + @final + @property + def empty(self) -> bool: + return not self.size + + @doc(op="max", oppose="min", value="largest") + def argmax( + self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs + ) -> int: + """ + Return int position of the {value} value in the Series. + + If the {op}imum is achieved in multiple locations, + the first row position is returned. + + Parameters + ---------- + axis : {{None}} + Unused. Parameter needed for compatibility with DataFrame. + skipna : bool, default True + Exclude NA/null values when showing the result. + *args, **kwargs + Additional arguments and keywords for compatibility with NumPy. + + Returns + ------- + int + Row position of the {op}imum value. + + See Also + -------- + Series.arg{op} : Return position of the {op}imum value. + Series.arg{oppose} : Return position of the {oppose}imum value. + numpy.ndarray.arg{op} : Equivalent method for numpy arrays. + Series.idxmax : Return index label of the maximum values. + Series.idxmin : Return index label of the minimum values. + + Examples + -------- + Consider dataset containing cereal calories + + >>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0, + ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}}) + >>> s + Corn Flakes 100.0 + Almond Delight 110.0 + Cinnamon Toast Crunch 120.0 + Cocoa Puff 110.0 + dtype: float64 + + >>> s.argmax() + 2 + >>> s.argmin() + 0 + + The maximum cereal calories is the third element and + the minimum cereal calories is the first element, + since series is zero-indexed. + """ + delegate = self._values + nv.validate_minmax_axis(axis) + skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) + + if isinstance(delegate, ExtensionArray): + if not skipna and delegate.isna().any(): + warnings.warn( + f"The behavior of {type(self).__name__}.argmax/argmin " + "with skipna=False and NAs, or with all-NAs is deprecated. " + "In a future version this will raise ValueError.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return -1 + else: + return delegate.argmax() + else: + result = nanops.nanargmax(delegate, skipna=skipna) + if result == -1: + warnings.warn( + f"The behavior of {type(self).__name__}.argmax/argmin " + "with skipna=False and NAs, or with all-NAs is deprecated. " + "In a future version this will raise ValueError.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # error: Incompatible return value type (got "Union[int, ndarray]", expected + # "int") + return result # type: ignore[return-value] + + @doc(argmax, op="min", oppose="max", value="smallest") + def argmin( + self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs + ) -> int: + delegate = self._values + nv.validate_minmax_axis(axis) + skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) + + if isinstance(delegate, ExtensionArray): + if not skipna and delegate.isna().any(): + warnings.warn( + f"The behavior of {type(self).__name__}.argmax/argmin " + "with skipna=False and NAs, or with all-NAs is deprecated. " + "In a future version this will raise ValueError.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return -1 + else: + return delegate.argmin() + else: + result = nanops.nanargmin(delegate, skipna=skipna) + if result == -1: + warnings.warn( + f"The behavior of {type(self).__name__}.argmax/argmin " + "with skipna=False and NAs, or with all-NAs is deprecated. " + "In a future version this will raise ValueError.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # error: Incompatible return value type (got "Union[int, ndarray]", expected + # "int") + return result # type: ignore[return-value] + + def tolist(self): + """ + Return a list of the values. + + These are each a scalar type, which is a Python scalar + (for str, int, float) or a pandas scalar + (for Timestamp/Timedelta/Interval/Period) + + Returns + ------- + list + + See Also + -------- + numpy.ndarray.tolist : Return the array as an a.ndim-levels deep + nested list of Python scalars. + + Examples + -------- + For Series + + >>> s = pd.Series([1, 2, 3]) + >>> s.to_list() + [1, 2, 3] + + For Index: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + + >>> idx.to_list() + [1, 2, 3] + """ + return self._values.tolist() + + to_list = tolist + + def __iter__(self) -> Iterator: + """ + Return an iterator of the values. + + These are each a scalar type, which is a Python scalar + (for str, int, float) or a pandas scalar + (for Timestamp/Timedelta/Interval/Period) + + Returns + ------- + iterator + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> for x in s: + ... print(x) + 1 + 2 + 3 + """ + # We are explicitly making element iterators. + if not isinstance(self._values, np.ndarray): + # Check type instead of dtype to catch DTA/TDA + return iter(self._values) + else: + return map(self._values.item, range(self._values.size)) + + @cache_readonly + def hasnans(self) -> bool: + """ + Return True if there are any NaNs. + + Enables various performance speedups. + + Returns + ------- + bool + + Examples + -------- + >>> s = pd.Series([1, 2, 3, None]) + >>> s + 0 1.0 + 1 2.0 + 2 3.0 + 3 NaN + dtype: float64 + >>> s.hasnans + True + """ + # error: Item "bool" of "Union[bool, ndarray[Any, dtype[bool_]], NDFrame]" + # has no attribute "any" + return bool(isna(self).any()) # type: ignore[union-attr] + + @final + def _map_values(self, mapper, na_action=None, convert: bool = True): + """ + An internal function that maps values using the input + correspondence (which can be a dict, Series, or function). + + Parameters + ---------- + mapper : function, dict, or Series + The input correspondence object + na_action : {None, 'ignore'} + If 'ignore', propagate NA values, without passing them to the + mapping function + convert : bool, default True + Try to find better dtype for elementwise function results. If + False, leave as dtype=object. Note that the dtype is always + preserved for some extension array dtypes, such as Categorical. + + Returns + ------- + Union[Index, MultiIndex], inferred + The output of the mapping function applied to the index. + If the function returns a tuple with more than one element + a MultiIndex will be returned. + """ + arr = self._values + + if isinstance(arr, ExtensionArray): + return arr.map(mapper, na_action=na_action) + + return algorithms.map_array(arr, mapper, na_action=na_action, convert=convert) + + @final + def value_counts( + self, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + bins=None, + dropna: bool = True, + ) -> Series: + """ + Return a Series containing counts of unique values. + + The resulting object will be in descending order so that the + first element is the most frequently-occurring element. + Excludes NA values by default. + + Parameters + ---------- + normalize : bool, default False + If True then the object returned will contain the relative + frequencies of the unique values. + sort : bool, default True + Sort by frequencies when True. Preserve the order of the data when False. + ascending : bool, default False + Sort in ascending order. + bins : int, optional + Rather than count values, group them into half-open bins, + a convenience for ``pd.cut``, only works with numeric data. + dropna : bool, default True + Don't include counts of NaN. + + Returns + ------- + Series + + See Also + -------- + Series.count: Number of non-NA elements in a Series. + DataFrame.count: Number of non-NA elements in a DataFrame. + DataFrame.value_counts: Equivalent method on DataFrames. + + Examples + -------- + >>> index = pd.Index([3, 1, 2, 3, 4, np.nan]) + >>> index.value_counts() + 3.0 2 + 1.0 1 + 2.0 1 + 4.0 1 + Name: count, dtype: int64 + + With `normalize` set to `True`, returns the relative frequency by + dividing all values by the sum of values. + + >>> s = pd.Series([3, 1, 2, 3, 4, np.nan]) + >>> s.value_counts(normalize=True) + 3.0 0.4 + 1.0 0.2 + 2.0 0.2 + 4.0 0.2 + Name: proportion, dtype: float64 + + **bins** + + Bins can be useful for going from a continuous variable to a + categorical variable; instead of counting unique + apparitions of values, divide the index in the specified + number of half-open bins. + + >>> s.value_counts(bins=3) + (0.996, 2.0] 2 + (2.0, 3.0] 2 + (3.0, 4.0] 1 + Name: count, dtype: int64 + + **dropna** + + With `dropna` set to `False` we can also see NaN index values. + + >>> s.value_counts(dropna=False) + 3.0 2 + 1.0 1 + 2.0 1 + 4.0 1 + NaN 1 + Name: count, dtype: int64 + """ + return algorithms.value_counts_internal( + self, + sort=sort, + ascending=ascending, + normalize=normalize, + bins=bins, + dropna=dropna, + ) + + def unique(self): + values = self._values + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + result = values.unique() + else: + result = algorithms.unique1d(values) + return result + + @final + def nunique(self, dropna: bool = True) -> int: + """ + Return number of unique elements in the object. + + Excludes NA values by default. + + Parameters + ---------- + dropna : bool, default True + Don't include NaN in the count. + + Returns + ------- + int + + See Also + -------- + DataFrame.nunique: Method nunique for DataFrame. + Series.count: Count non-NA/null observations in the Series. + + Examples + -------- + >>> s = pd.Series([1, 3, 5, 7, 7]) + >>> s + 0 1 + 1 3 + 2 5 + 3 7 + 4 7 + dtype: int64 + + >>> s.nunique() + 4 + """ + uniqs = self.unique() + if dropna: + uniqs = remove_na_arraylike(uniqs) + return len(uniqs) + + @property + def is_unique(self) -> bool: + """ + Return boolean if values in the object are unique. + + Returns + ------- + bool + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.is_unique + True + + >>> s = pd.Series([1, 2, 3, 1]) + >>> s.is_unique + False + """ + return self.nunique(dropna=False) == len(self) + + @property + def is_monotonic_increasing(self) -> bool: + """ + Return boolean if values in the object are monotonically increasing. + + Returns + ------- + bool + + Examples + -------- + >>> s = pd.Series([1, 2, 2]) + >>> s.is_monotonic_increasing + True + + >>> s = pd.Series([3, 2, 1]) + >>> s.is_monotonic_increasing + False + """ + from pandas import Index + + return Index(self).is_monotonic_increasing + + @property + def is_monotonic_decreasing(self) -> bool: + """ + Return boolean if values in the object are monotonically decreasing. + + Returns + ------- + bool + + Examples + -------- + >>> s = pd.Series([3, 2, 2, 1]) + >>> s.is_monotonic_decreasing + True + + >>> s = pd.Series([1, 2, 3]) + >>> s.is_monotonic_decreasing + False + """ + from pandas import Index + + return Index(self).is_monotonic_decreasing + + @final + def _memory_usage(self, deep: bool = False) -> int: + """ + Memory usage of the values. + + Parameters + ---------- + deep : bool, default False + Introspect the data deeply, interrogate + `object` dtypes for system-level memory consumption. + + Returns + ------- + bytes used + + See Also + -------- + numpy.ndarray.nbytes : Total bytes consumed by the elements of the + array. + + Notes + ----- + Memory usage does not include memory consumed by elements that + are not components of the array if deep=False or if used on PyPy + + Examples + -------- + >>> idx = pd.Index([1, 2, 3]) + >>> idx.memory_usage() + 24 + """ + if hasattr(self.array, "memory_usage"): + return self.array.memory_usage( # pyright: ignore[reportGeneralTypeIssues] + deep=deep, + ) + + v = self.array.nbytes + if deep and is_object_dtype(self.dtype) and not PYPY: + values = cast(np.ndarray, self._values) + v += lib.memory_usage_of_objects(values) + return v + + @doc( + algorithms.factorize, + values="", + order="", + size_hint="", + sort=textwrap.dedent( + """\ + sort : bool, default False + Sort `uniques` and shuffle `codes` to maintain the + relationship. + """ + ), + ) + def factorize( + self, + sort: bool = False, + use_na_sentinel: bool = True, + ) -> tuple[npt.NDArray[np.intp], Index]: + codes, uniques = algorithms.factorize( + self._values, sort=sort, use_na_sentinel=use_na_sentinel + ) + if uniques.dtype == np.float16: + uniques = uniques.astype(np.float32) + + if isinstance(self, ABCIndex): + # preserve e.g. MultiIndex + uniques = self._constructor(uniques) + else: + from pandas import Index + + uniques = Index(uniques) + return codes, uniques + + _shared_docs[ + "searchsorted" + ] = """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted {klass} `self` such that, if the + corresponding elements in `value` were inserted before the indices, + the order of `self` would be preserved. + + .. note:: + + The {klass} *must* be monotonically sorted, otherwise + wrong locations will likely be returned. Pandas does *not* + check this for you. + + Parameters + ---------- + value : array-like or scalar + Values to insert into `self`. + side : {{'left', 'right'}}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `self`). + sorter : 1-D array-like, optional + Optional array of integer indices that sort `self` into ascending + order. They are typically the result of ``np.argsort``. + + Returns + ------- + int or array of int + A scalar or array of insertion points with the + same shape as `value`. + + See Also + -------- + sort_values : Sort by the values along either axis. + numpy.searchsorted : Similar method from NumPy. + + Notes + ----- + Binary search is used to find the required insertion points. + + Examples + -------- + >>> ser = pd.Series([1, 2, 3]) + >>> ser + 0 1 + 1 2 + 2 3 + dtype: int64 + + >>> ser.searchsorted(4) + 3 + + >>> ser.searchsorted([0, 4]) + array([0, 3]) + + >>> ser.searchsorted([1, 3], side='left') + array([0, 2]) + + >>> ser.searchsorted([1, 3], side='right') + array([1, 3]) + + >>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'])) + >>> ser + 0 2000-03-11 + 1 2000-03-12 + 2 2000-03-13 + dtype: datetime64[ns] + + >>> ser.searchsorted('3/14/2000') + 3 + + >>> ser = pd.Categorical( + ... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True + ... ) + >>> ser + ['apple', 'bread', 'bread', 'cheese', 'milk'] + Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk'] + + >>> ser.searchsorted('bread') + 1 + + >>> ser.searchsorted(['bread'], side='right') + array([3]) + + If the values are not monotonically sorted, wrong locations + may be returned: + + >>> ser = pd.Series([2, 1, 3]) + >>> ser + 0 2 + 1 1 + 2 3 + dtype: int64 + + >>> ser.searchsorted(1) # doctest: +SKIP + 0 # wrong result, correct would be 1 + """ + + # This overload is needed so that the call to searchsorted in + # pandas.core.resample.TimeGrouper._get_period_bins picks the correct result + + # error: Overloaded function signatures 1 and 2 overlap with incompatible + # return types + @overload + def searchsorted( # type: ignore[overload-overlap] + self, + value: ScalarLike_co, + side: Literal["left", "right"] = ..., + sorter: NumpySorter = ..., + ) -> np.intp: + ... + + @overload + def searchsorted( + self, + value: npt.ArrayLike | ExtensionArray, + side: Literal["left", "right"] = ..., + sorter: NumpySorter = ..., + ) -> npt.NDArray[np.intp]: + ... + + @doc(_shared_docs["searchsorted"], klass="Index") + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + if isinstance(value, ABCDataFrame): + msg = ( + "Value must be 1-D array-like or scalar, " + f"{type(value).__name__} is not supported" + ) + raise ValueError(msg) + + values = self._values + if not isinstance(values, np.ndarray): + # Going through EA.searchsorted directly improves performance GH#38083 + return values.searchsorted(value, side=side, sorter=sorter) + + return algorithms.searchsorted( + values, + value, + side=side, + sorter=sorter, + ) + + def drop_duplicates(self, *, keep: DropKeep = "first"): + duplicated = self._duplicated(keep=keep) + # error: Value of type "IndexOpsMixin" is not indexable + return self[~duplicated] # type: ignore[index] + + @final + def _duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: + arr = self._values + if isinstance(arr, ExtensionArray): + return arr.duplicated(keep=keep) + return algorithms.duplicated(arr, keep=keep) + + def _arith_method(self, other, op): + res_name = ops.get_op_result_name(self, other) + + lvalues = self._values + rvalues = extract_array(other, extract_numpy=True, extract_range=True) + rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape) + rvalues = ensure_wrapped_if_datetimelike(rvalues) + if isinstance(rvalues, range): + rvalues = np.arange(rvalues.start, rvalues.stop, rvalues.step) + + with np.errstate(all="ignore"): + result = ops.arithmetic_op(lvalues, rvalues, op) + + return self._construct_result(result, name=res_name) + + def _construct_result(self, result, name): + """ + Construct an appropriately-wrapped result from the ArrayLike result + of an arithmetic-like operation. + """ + raise AbstractMethodError(self) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/common.py b/videollama2/lib/python3.10/site-packages/pandas/core/common.py new file mode 100644 index 0000000000000000000000000000000000000000..9f024498d66edfe9cab7b6fb2e0802da79163786 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/common.py @@ -0,0 +1,657 @@ +""" +Misc tools for implementing data structures + +Note: pandas.core.common is *not* part of the public API. +""" +from __future__ import annotations + +import builtins +from collections import ( + abc, + defaultdict, +) +from collections.abc import ( + Collection, + Generator, + Hashable, + Iterable, + Sequence, +) +import contextlib +from functools import partial +import inspect +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas.compat.numpy import np_version_gte1p24 + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer, +) +from pandas.core.dtypes.generic import ( + ABCExtensionArray, + ABCIndex, + ABCMultiIndex, + ABCSeries, +) +from pandas.core.dtypes.inference import iterable_not_string + +if TYPE_CHECKING: + from pandas._typing import ( + AnyArrayLike, + ArrayLike, + NpDtype, + RandomState, + T, + ) + + from pandas import Index + + +def flatten(line): + """ + Flatten an arbitrarily nested sequence. + + Parameters + ---------- + line : sequence + The non string sequence to flatten + + Notes + ----- + This doesn't consider strings sequences. + + Returns + ------- + flattened : generator + """ + for element in line: + if iterable_not_string(element): + yield from flatten(element) + else: + yield element + + +def consensus_name_attr(objs): + name = objs[0].name + for obj in objs[1:]: + try: + if obj.name != name: + name = None + except ValueError: + name = None + return name + + +def is_bool_indexer(key: Any) -> bool: + """ + Check whether `key` is a valid boolean indexer. + + Parameters + ---------- + key : Any + Only list-likes may be considered boolean indexers. + All other types are not considered a boolean indexer. + For array-like input, boolean ndarrays or ExtensionArrays + with ``_is_boolean`` set are considered boolean indexers. + + Returns + ------- + bool + Whether `key` is a valid boolean indexer. + + Raises + ------ + ValueError + When the array is an object-dtype ndarray or ExtensionArray + and contains missing values. + + See Also + -------- + check_array_indexer : Check that `key` is a valid array to index, + and convert to an ndarray. + """ + if isinstance( + key, (ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray) + ) and not isinstance(key, ABCMultiIndex): + if key.dtype == np.object_: + key_array = np.asarray(key) + + if not lib.is_bool_array(key_array): + na_msg = "Cannot mask with non-boolean array containing NA / NaN values" + if lib.is_bool_array(key_array, skipna=True): + # Don't raise on e.g. ["A", "B", np.nan], see + # test_loc_getitem_list_of_labels_categoricalindex_with_na + raise ValueError(na_msg) + return False + return True + elif is_bool_dtype(key.dtype): + return True + elif isinstance(key, list): + # check if np.array(key).dtype would be bool + if len(key) > 0: + if type(key) is not list: # noqa: E721 + # GH#42461 cython will raise TypeError if we pass a subclass + key = list(key) + return lib.is_bool_list(key) + + return False + + +def cast_scalar_indexer(val): + """ + Disallow indexing with a float key, even if that key is a round number. + + Parameters + ---------- + val : scalar + + Returns + ------- + outval : scalar + """ + # assumes lib.is_scalar(val) + if lib.is_float(val) and val.is_integer(): + raise IndexError( + # GH#34193 + "Indexing with a float is no longer supported. Manually convert " + "to an integer key instead." + ) + return val + + +def not_none(*args): + """ + Returns a generator consisting of the arguments that are not None. + """ + return (arg for arg in args if arg is not None) + + +def any_none(*args) -> bool: + """ + Returns a boolean indicating if any argument is None. + """ + return any(arg is None for arg in args) + + +def all_none(*args) -> bool: + """ + Returns a boolean indicating if all arguments are None. + """ + return all(arg is None for arg in args) + + +def any_not_none(*args) -> bool: + """ + Returns a boolean indicating if any argument is not None. + """ + return any(arg is not None for arg in args) + + +def all_not_none(*args) -> bool: + """ + Returns a boolean indicating if all arguments are not None. + """ + return all(arg is not None for arg in args) + + +def count_not_none(*args) -> int: + """ + Returns the count of arguments that are not None. + """ + return sum(x is not None for x in args) + + +@overload +def asarray_tuplesafe( + values: ArrayLike | list | tuple | zip, dtype: NpDtype | None = ... +) -> np.ndarray: + # ExtensionArray can only be returned when values is an Index, all other iterables + # will return np.ndarray. Unfortunately "all other" cannot be encoded in a type + # signature, so instead we special-case some common types. + ... + + +@overload +def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike: + ... + + +def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike: + if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")): + values = list(values) + elif isinstance(values, ABCIndex): + return values._values + elif isinstance(values, ABCSeries): + return values._values + + if isinstance(values, list) and dtype in [np.object_, object]: + return construct_1d_object_array_from_listlike(values) + + try: + with warnings.catch_warnings(): + # Can remove warning filter once NumPy 1.24 is min version + if not np_version_gte1p24: + warnings.simplefilter("ignore", np.VisibleDeprecationWarning) + result = np.asarray(values, dtype=dtype) + except ValueError: + # Using try/except since it's more performant than checking is_list_like + # over each element + # error: Argument 1 to "construct_1d_object_array_from_listlike" + # has incompatible type "Iterable[Any]"; expected "Sized" + return construct_1d_object_array_from_listlike(values) # type: ignore[arg-type] + + if issubclass(result.dtype.type, str): + result = np.asarray(values, dtype=object) + + if result.ndim == 2: + # Avoid building an array of arrays: + values = [tuple(x) for x in values] + result = construct_1d_object_array_from_listlike(values) + + return result + + +def index_labels_to_array( + labels: np.ndarray | Iterable, dtype: NpDtype | None = None +) -> np.ndarray: + """ + Transform label or iterable of labels to array, for use in Index. + + Parameters + ---------- + dtype : dtype + If specified, use as dtype of the resulting array, otherwise infer. + + Returns + ------- + array + """ + if isinstance(labels, (str, tuple)): + labels = [labels] + + if not isinstance(labels, (list, np.ndarray)): + try: + labels = list(labels) + except TypeError: # non-iterable + labels = [labels] + + labels = asarray_tuplesafe(labels, dtype=dtype) + + return labels + + +def maybe_make_list(obj): + if obj is not None and not isinstance(obj, (tuple, list)): + return [obj] + return obj + + +def maybe_iterable_to_list(obj: Iterable[T] | T) -> Collection[T] | T: + """ + If obj is Iterable but not list-like, consume into list. + """ + if isinstance(obj, abc.Iterable) and not isinstance(obj, abc.Sized): + return list(obj) + obj = cast(Collection, obj) + return obj + + +def is_null_slice(obj) -> bool: + """ + We have a null slice. + """ + return ( + isinstance(obj, slice) + and obj.start is None + and obj.stop is None + and obj.step is None + ) + + +def is_empty_slice(obj) -> bool: + """ + We have an empty slice, e.g. no values are selected. + """ + return ( + isinstance(obj, slice) + and obj.start is not None + and obj.stop is not None + and obj.start == obj.stop + ) + + +def is_true_slices(line) -> list[bool]: + """ + Find non-trivial slices in "line": return a list of booleans with same length. + """ + return [isinstance(k, slice) and not is_null_slice(k) for k in line] + + +# TODO: used only once in indexing; belongs elsewhere? +def is_full_slice(obj, line: int) -> bool: + """ + We have a full length slice. + """ + return ( + isinstance(obj, slice) + and obj.start == 0 + and obj.stop == line + and obj.step is None + ) + + +def get_callable_name(obj): + # typical case has name + if hasattr(obj, "__name__"): + return getattr(obj, "__name__") + # some objects don't; could recurse + if isinstance(obj, partial): + return get_callable_name(obj.func) + # fall back to class name + if callable(obj): + return type(obj).__name__ + # everything failed (probably because the argument + # wasn't actually callable); we return None + # instead of the empty string in this case to allow + # distinguishing between no name and a name of '' + return None + + +def apply_if_callable(maybe_callable, obj, **kwargs): + """ + Evaluate possibly callable input using obj and kwargs if it is callable, + otherwise return as it is. + + Parameters + ---------- + maybe_callable : possibly a callable + obj : NDFrame + **kwargs + """ + if callable(maybe_callable): + return maybe_callable(obj, **kwargs) + + return maybe_callable + + +def standardize_mapping(into): + """ + Helper function to standardize a supplied mapping. + + Parameters + ---------- + into : instance or subclass of collections.abc.Mapping + Must be a class, an initialized collections.defaultdict, + or an instance of a collections.abc.Mapping subclass. + + Returns + ------- + mapping : a collections.abc.Mapping subclass or other constructor + a callable object that can accept an iterator to create + the desired Mapping. + + See Also + -------- + DataFrame.to_dict + Series.to_dict + """ + if not inspect.isclass(into): + if isinstance(into, defaultdict): + return partial(defaultdict, into.default_factory) + into = type(into) + if not issubclass(into, abc.Mapping): + raise TypeError(f"unsupported type: {into}") + if into == defaultdict: + raise TypeError("to_dict() only accepts initialized defaultdicts") + return into + + +@overload +def random_state(state: np.random.Generator) -> np.random.Generator: + ... + + +@overload +def random_state( + state: int | np.ndarray | np.random.BitGenerator | np.random.RandomState | None, +) -> np.random.RandomState: + ... + + +def random_state(state: RandomState | None = None): + """ + Helper function for processing random_state arguments. + + Parameters + ---------- + state : int, array-like, BitGenerator, Generator, np.random.RandomState, None. + If receives an int, array-like, or BitGenerator, passes to + np.random.RandomState() as seed. + If receives an np.random RandomState or Generator, just returns that unchanged. + If receives `None`, returns np.random. + If receives anything else, raises an informative ValueError. + + Default None. + + Returns + ------- + np.random.RandomState or np.random.Generator. If state is None, returns np.random + + """ + if is_integer(state) or isinstance(state, (np.ndarray, np.random.BitGenerator)): + return np.random.RandomState(state) + elif isinstance(state, np.random.RandomState): + return state + elif isinstance(state, np.random.Generator): + return state + elif state is None: + return np.random + else: + raise ValueError( + "random_state must be an integer, array-like, a BitGenerator, Generator, " + "a numpy RandomState, or None" + ) + + +def pipe( + obj, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs +) -> T: + """ + Apply a function ``func`` to object ``obj`` either by passing obj as the + first argument to the function or, in the case that the func is a tuple, + interpret the first element of the tuple as a function and pass the obj to + that function as a keyword argument whose key is the value of the second + element of the tuple. + + Parameters + ---------- + func : callable or tuple of (callable, str) + Function to apply to this object or, alternatively, a + ``(callable, data_keyword)`` tuple where ``data_keyword`` is a + string indicating the keyword of ``callable`` that expects the + object. + *args : iterable, optional + Positional arguments passed into ``func``. + **kwargs : dict, optional + A dictionary of keyword arguments passed into ``func``. + + Returns + ------- + object : the return type of ``func``. + """ + if isinstance(func, tuple): + func, target = func + if target in kwargs: + msg = f"{target} is both the pipe target and a keyword argument" + raise ValueError(msg) + kwargs[target] = obj + return func(*args, **kwargs) + else: + return func(obj, *args, **kwargs) + + +def get_rename_function(mapper): + """ + Returns a function that will map names/labels, dependent if mapper + is a dict, Series or just a function. + """ + + def f(x): + if x in mapper: + return mapper[x] + else: + return x + + return f if isinstance(mapper, (abc.Mapping, ABCSeries)) else mapper + + +def convert_to_list_like( + values: Hashable | Iterable | AnyArrayLike, +) -> list | AnyArrayLike: + """ + Convert list-like or scalar input to list-like. List, numpy and pandas array-like + inputs are returned unmodified whereas others are converted to list. + """ + if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)): + return values + elif isinstance(values, abc.Iterable) and not isinstance(values, str): + return list(values) + + return [values] + + +@contextlib.contextmanager +def temp_setattr( + obj, attr: str, value, condition: bool = True +) -> Generator[None, None, None]: + """ + Temporarily set attribute on an object. + + Parameters + ---------- + obj : object + Object whose attribute will be modified. + attr : str + Attribute to modify. + value : Any + Value to temporarily set attribute to. + condition : bool, default True + Whether to set the attribute. Provided in order to not have to + conditionally use this context manager. + + Yields + ------ + object : obj with modified attribute. + """ + if condition: + old_value = getattr(obj, attr) + setattr(obj, attr, value) + try: + yield obj + finally: + if condition: + setattr(obj, attr, old_value) + + +def require_length_match(data, index: Index) -> None: + """ + Check the length of data matches the length of the index. + """ + if len(data) != len(index): + raise ValueError( + "Length of values " + f"({len(data)}) " + "does not match length of index " + f"({len(index)})" + ) + + +# the ufuncs np.maximum.reduce and np.minimum.reduce default to axis=0, +# whereas np.min and np.max (which directly call obj.min and obj.max) +# default to axis=None. +_builtin_table = { + builtins.sum: np.sum, + builtins.max: np.maximum.reduce, + builtins.min: np.minimum.reduce, +} + +# GH#53425: Only for deprecation +_builtin_table_alias = { + builtins.sum: "np.sum", + builtins.max: "np.maximum.reduce", + builtins.min: "np.minimum.reduce", +} + +_cython_table = { + builtins.sum: "sum", + builtins.max: "max", + builtins.min: "min", + np.all: "all", + np.any: "any", + np.sum: "sum", + np.nansum: "sum", + np.mean: "mean", + np.nanmean: "mean", + np.prod: "prod", + np.nanprod: "prod", + np.std: "std", + np.nanstd: "std", + np.var: "var", + np.nanvar: "var", + np.median: "median", + np.nanmedian: "median", + np.max: "max", + np.nanmax: "max", + np.min: "min", + np.nanmin: "min", + np.cumprod: "cumprod", + np.nancumprod: "cumprod", + np.cumsum: "cumsum", + np.nancumsum: "cumsum", +} + + +def get_cython_func(arg: Callable) -> str | None: + """ + if we define an internal function for this argument, return it + """ + return _cython_table.get(arg) + + +def is_builtin_func(arg): + """ + if we define a builtin function for this argument, return it, + otherwise return the arg + """ + return _builtin_table.get(arg, arg) + + +def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]: + """ + If a name is missing then replace it by level_n, where n is the count + + .. versionadded:: 1.4.0 + + Parameters + ---------- + names : list-like + list of column names or None values. + + Returns + ------- + list + list of column names with the None values replaced. + """ + return [f"level_{i}" if name is None else name for i, name in enumerate(names)] diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/config_init.py b/videollama2/lib/python3.10/site-packages/pandas/core/config_init.py new file mode 100644 index 0000000000000000000000000000000000000000..a8b63f97141c22907726b477e1197ffae2a57853 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/config_init.py @@ -0,0 +1,924 @@ +""" +This module is imported from the pandas package __init__.py file +in order to ensure that the core.config options registered here will +be available as soon as the user loads the package. if register_option +is invoked inside specific modules, they will not be registered until that +module is imported, which may or may not be a problem. + +If you need to make sure options are available even before a certain +module is imported, register them here rather than in the module. + +""" +from __future__ import annotations + +import os +from typing import Callable + +import pandas._config.config as cf +from pandas._config.config import ( + is_bool, + is_callable, + is_instance_factory, + is_int, + is_nonnegative_int, + is_one_of_factory, + is_str, + is_text, +) + +# compute + +use_bottleneck_doc = """ +: bool + Use the bottleneck library to accelerate if it is installed, + the default is True + Valid values: False,True +""" + + +def use_bottleneck_cb(key) -> None: + from pandas.core import nanops + + nanops.set_use_bottleneck(cf.get_option(key)) + + +use_numexpr_doc = """ +: bool + Use the numexpr library to accelerate computation if it is installed, + the default is True + Valid values: False,True +""" + + +def use_numexpr_cb(key) -> None: + from pandas.core.computation import expressions + + expressions.set_use_numexpr(cf.get_option(key)) + + +use_numba_doc = """ +: bool + Use the numba engine option for select operations if it is installed, + the default is False + Valid values: False,True +""" + + +def use_numba_cb(key) -> None: + from pandas.core.util import numba_ + + numba_.set_use_numba(cf.get_option(key)) + + +with cf.config_prefix("compute"): + cf.register_option( + "use_bottleneck", + True, + use_bottleneck_doc, + validator=is_bool, + cb=use_bottleneck_cb, + ) + cf.register_option( + "use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb + ) + cf.register_option( + "use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb + ) +# +# options from the "display" namespace + +pc_precision_doc = """ +: int + Floating point output precision in terms of number of places after the + decimal, for regular formatting as well as scientific notation. Similar + to ``precision`` in :meth:`numpy.set_printoptions`. +""" + +pc_colspace_doc = """ +: int + Default space for DataFrame columns. +""" + +pc_max_rows_doc = """ +: int + If max_rows is exceeded, switch to truncate view. Depending on + `large_repr`, objects are either centrally truncated or printed as + a summary view. 'None' value means unlimited. + + In case python/IPython is running in a terminal and `large_repr` + equals 'truncate' this can be set to 0 and pandas will auto-detect + the height of the terminal and print a truncated object which fits + the screen height. The IPython notebook, IPython qtconsole, or + IDLE do not run in a terminal and hence it is not possible to do + correct auto-detection. +""" + +pc_min_rows_doc = """ +: int + The numbers of rows to show in a truncated view (when `max_rows` is + exceeded). Ignored when `max_rows` is set to None or 0. When set to + None, follows the value of `max_rows`. +""" + +pc_max_cols_doc = """ +: int + If max_cols is exceeded, switch to truncate view. Depending on + `large_repr`, objects are either centrally truncated or printed as + a summary view. 'None' value means unlimited. + + In case python/IPython is running in a terminal and `large_repr` + equals 'truncate' this can be set to 0 or None and pandas will auto-detect + the width of the terminal and print a truncated object which fits + the screen width. The IPython notebook, IPython qtconsole, or IDLE + do not run in a terminal and hence it is not possible to do + correct auto-detection and defaults to 20. +""" + +pc_max_categories_doc = """ +: int + This sets the maximum number of categories pandas should output when + printing out a `Categorical` or a Series of dtype "category". +""" + +pc_max_info_cols_doc = """ +: int + max_info_columns is used in DataFrame.info method to decide if + per column information will be printed. +""" + +pc_nb_repr_h_doc = """ +: boolean + When True, IPython notebook will use html representation for + pandas objects (if it is available). +""" + +pc_pprint_nest_depth = """ +: int + Controls the number of nested levels to process when pretty-printing +""" + +pc_multi_sparse_doc = """ +: boolean + "sparsify" MultiIndex display (don't display repeated + elements in outer levels within groups) +""" + +float_format_doc = """ +: callable + The callable should accept a floating point number and return + a string with the desired format of the number. This is used + in some places like SeriesFormatter. + See formats.format.EngFormatter for an example. +""" + +max_colwidth_doc = """ +: int or None + The maximum width in characters of a column in the repr of + a pandas data structure. When the column overflows, a "..." + placeholder is embedded in the output. A 'None' value means unlimited. +""" + +colheader_justify_doc = """ +: 'left'/'right' + Controls the justification of column headers. used by DataFrameFormatter. +""" + +pc_expand_repr_doc = """ +: boolean + Whether to print out the full DataFrame repr for wide DataFrames across + multiple lines, `max_columns` is still respected, but the output will + wrap-around across multiple "pages" if its width exceeds `display.width`. +""" + +pc_show_dimensions_doc = """ +: boolean or 'truncate' + Whether to print out dimensions at the end of DataFrame repr. + If 'truncate' is specified, only print out the dimensions if the + frame is truncated (e.g. not display all rows and/or columns) +""" + +pc_east_asian_width_doc = """ +: boolean + Whether to use the Unicode East Asian Width to calculate the display text + width. + Enabling this may affect to the performance (default: False) +""" + +pc_ambiguous_as_wide_doc = """ +: boolean + Whether to handle Unicode characters belong to Ambiguous as Wide (width=2) + (default: False) +""" + +pc_table_schema_doc = """ +: boolean + Whether to publish a Table Schema representation for frontends + that support it. + (default: False) +""" + +pc_html_border_doc = """ +: int + A ``border=value`` attribute is inserted in the ```` tag + for the DataFrame HTML repr. +""" + +pc_html_use_mathjax_doc = """\ +: boolean + When True, Jupyter notebook will process table contents using MathJax, + rendering mathematical expressions enclosed by the dollar symbol. + (default: True) +""" + +pc_max_dir_items = """\ +: int + The number of items that will be added to `dir(...)`. 'None' value means + unlimited. Because dir is cached, changing this option will not immediately + affect already existing dataframes until a column is deleted or added. + + This is for instance used to suggest columns from a dataframe to tab + completion. +""" + +pc_width_doc = """ +: int + Width of the display in characters. In case python/IPython is running in + a terminal this can be set to None and pandas will correctly auto-detect + the width. + Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a + terminal and hence it is not possible to correctly detect the width. +""" + +pc_chop_threshold_doc = """ +: float or None + if set to a float value, all float values smaller than the given threshold + will be displayed as exactly 0 by repr and friends. +""" + +pc_max_seq_items = """ +: int or None + When pretty-printing a long sequence, no more then `max_seq_items` + will be printed. If items are omitted, they will be denoted by the + addition of "..." to the resulting string. + + If set to None, the number of items to be printed is unlimited. +""" + +pc_max_info_rows_doc = """ +: int + df.info() will usually show null-counts for each column. + For large frames this can be quite slow. max_info_rows and max_info_cols + limit this null check only to frames with smaller dimensions than + specified. +""" + +pc_large_repr_doc = """ +: 'truncate'/'info' + For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can + show a truncated table, or switch to the view from + df.info() (the behaviour in earlier versions of pandas). +""" + +pc_memory_usage_doc = """ +: bool, string or None + This specifies if the memory usage of a DataFrame should be displayed when + df.info() is called. Valid values True,False,'deep' +""" + + +def table_schema_cb(key) -> None: + from pandas.io.formats.printing import enable_data_resource_formatter + + enable_data_resource_formatter(cf.get_option(key)) + + +def is_terminal() -> bool: + """ + Detect if Python is running in a terminal. + + Returns True if Python is running in a terminal or False if not. + """ + try: + # error: Name 'get_ipython' is not defined + ip = get_ipython() # type: ignore[name-defined] + except NameError: # assume standard Python interpreter in a terminal + return True + else: + if hasattr(ip, "kernel"): # IPython as a Jupyter kernel + return False + else: # IPython in a terminal + return True + + +with cf.config_prefix("display"): + cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int) + cf.register_option( + "float_format", + None, + float_format_doc, + validator=is_one_of_factory([None, is_callable]), + ) + cf.register_option( + "max_info_rows", + 1690785, + pc_max_info_rows_doc, + validator=is_int, + ) + cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int) + cf.register_option( + "min_rows", + 10, + pc_min_rows_doc, + validator=is_instance_factory([type(None), int]), + ) + cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int) + + cf.register_option( + "max_colwidth", + 50, + max_colwidth_doc, + validator=is_nonnegative_int, + ) + if is_terminal(): + max_cols = 0 # automatically determine optimal number of columns + else: + max_cols = 20 # cannot determine optimal number of columns + cf.register_option( + "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int + ) + cf.register_option( + "large_repr", + "truncate", + pc_large_repr_doc, + validator=is_one_of_factory(["truncate", "info"]), + ) + cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int) + cf.register_option( + "colheader_justify", "right", colheader_justify_doc, validator=is_text + ) + cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool) + cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int) + cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool) + cf.register_option("expand_frame_repr", True, pc_expand_repr_doc) + cf.register_option( + "show_dimensions", + "truncate", + pc_show_dimensions_doc, + validator=is_one_of_factory([True, False, "truncate"]), + ) + cf.register_option("chop_threshold", None, pc_chop_threshold_doc) + cf.register_option("max_seq_items", 100, pc_max_seq_items) + cf.register_option( + "width", 80, pc_width_doc, validator=is_instance_factory([type(None), int]) + ) + cf.register_option( + "memory_usage", + True, + pc_memory_usage_doc, + validator=is_one_of_factory([None, True, False, "deep"]), + ) + cf.register_option( + "unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool + ) + cf.register_option( + "unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool + ) + cf.register_option( + "html.table_schema", + False, + pc_table_schema_doc, + validator=is_bool, + cb=table_schema_cb, + ) + cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int) + cf.register_option( + "html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool + ) + cf.register_option( + "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int + ) + +tc_sim_interactive_doc = """ +: boolean + Whether to simulate interactive mode for purposes of testing +""" + +with cf.config_prefix("mode"): + cf.register_option("sim_interactive", False, tc_sim_interactive_doc) + +use_inf_as_na_doc = """ +: boolean + True means treat None, NaN, INF, -INF as NA (old way), + False means None and NaN are null, but INF, -INF are not NA + (new way). + + This option is deprecated in pandas 2.1.0 and will be removed in 3.0. +""" + +# We don't want to start importing everything at the global context level +# or we'll hit circular deps. + + +def use_inf_as_na_cb(key) -> None: + # TODO(3.0): enforcing this deprecation will close GH#52501 + from pandas.core.dtypes.missing import _use_inf_as_na + + _use_inf_as_na(key) + + +with cf.config_prefix("mode"): + cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb) + +cf.deprecate_option( + # GH#51684 + "mode.use_inf_as_na", + "use_inf_as_na option is deprecated and will be removed in a future " + "version. Convert inf values to NaN before operating instead.", +) + +data_manager_doc = """ +: string + Internal data manager type; can be "block" or "array". Defaults to "block", + unless overridden by the 'PANDAS_DATA_MANAGER' environment variable (needs + to be set before pandas is imported). +""" + + +with cf.config_prefix("mode"): + cf.register_option( + "data_manager", + # Get the default from an environment variable, if set, otherwise defaults + # to "block". This environment variable can be set for testing. + os.environ.get("PANDAS_DATA_MANAGER", "block"), + data_manager_doc, + validator=is_one_of_factory(["block", "array"]), + ) + +cf.deprecate_option( + # GH#55043 + "mode.data_manager", + "data_manager option is deprecated and will be removed in a future " + "version. Only the BlockManager will be available.", +) + + +# TODO better name? +copy_on_write_doc = """ +: bool + Use new copy-view behaviour using Copy-on-Write. Defaults to False, + unless overridden by the 'PANDAS_COPY_ON_WRITE' environment variable + (if set to "1" for True, needs to be set before pandas is imported). +""" + + +with cf.config_prefix("mode"): + cf.register_option( + "copy_on_write", + # Get the default from an environment variable, if set, otherwise defaults + # to False. This environment variable can be set for testing. + "warn" + if os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "warn" + else os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1", + copy_on_write_doc, + validator=is_one_of_factory([True, False, "warn"]), + ) + + +# user warnings +chained_assignment = """ +: string + Raise an exception, warn, or no action if trying to use chained assignment, + The default is warn +""" + +with cf.config_prefix("mode"): + cf.register_option( + "chained_assignment", + "warn", + chained_assignment, + validator=is_one_of_factory([None, "warn", "raise"]), + ) + + +string_storage_doc = """ +: string + The default storage for StringDtype. This option is ignored if + ``future.infer_string`` is set to True. +""" + +with cf.config_prefix("mode"): + cf.register_option( + "string_storage", + "python", + string_storage_doc, + validator=is_one_of_factory(["python", "pyarrow", "pyarrow_numpy"]), + ) + + +# Set up the io.excel specific reader configuration. +reader_engine_doc = """ +: string + The default Excel reader engine for '{ext}' files. Available options: + auto, {others}. +""" + +_xls_options = ["xlrd", "calamine"] +_xlsm_options = ["xlrd", "openpyxl", "calamine"] +_xlsx_options = ["xlrd", "openpyxl", "calamine"] +_ods_options = ["odf", "calamine"] +_xlsb_options = ["pyxlsb", "calamine"] + + +with cf.config_prefix("io.excel.xls"): + cf.register_option( + "reader", + "auto", + reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)), + validator=is_one_of_factory(_xls_options + ["auto"]), + ) + +with cf.config_prefix("io.excel.xlsm"): + cf.register_option( + "reader", + "auto", + reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), + validator=is_one_of_factory(_xlsm_options + ["auto"]), + ) + + +with cf.config_prefix("io.excel.xlsx"): + cf.register_option( + "reader", + "auto", + reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), + validator=is_one_of_factory(_xlsx_options + ["auto"]), + ) + + +with cf.config_prefix("io.excel.ods"): + cf.register_option( + "reader", + "auto", + reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)), + validator=is_one_of_factory(_ods_options + ["auto"]), + ) + +with cf.config_prefix("io.excel.xlsb"): + cf.register_option( + "reader", + "auto", + reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)), + validator=is_one_of_factory(_xlsb_options + ["auto"]), + ) + +# Set up the io.excel specific writer configuration. +writer_engine_doc = """ +: string + The default Excel writer engine for '{ext}' files. Available options: + auto, {others}. +""" + +_xlsm_options = ["openpyxl"] +_xlsx_options = ["openpyxl", "xlsxwriter"] +_ods_options = ["odf"] + + +with cf.config_prefix("io.excel.xlsm"): + cf.register_option( + "writer", + "auto", + writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), + validator=str, + ) + + +with cf.config_prefix("io.excel.xlsx"): + cf.register_option( + "writer", + "auto", + writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), + validator=str, + ) + + +with cf.config_prefix("io.excel.ods"): + cf.register_option( + "writer", + "auto", + writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)), + validator=str, + ) + + +# Set up the io.parquet specific configuration. +parquet_engine_doc = """ +: string + The default parquet reader/writer engine. Available options: + 'auto', 'pyarrow', 'fastparquet', the default is 'auto' +""" + +with cf.config_prefix("io.parquet"): + cf.register_option( + "engine", + "auto", + parquet_engine_doc, + validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]), + ) + + +# Set up the io.sql specific configuration. +sql_engine_doc = """ +: string + The default sql reader/writer engine. Available options: + 'auto', 'sqlalchemy', the default is 'auto' +""" + +with cf.config_prefix("io.sql"): + cf.register_option( + "engine", + "auto", + sql_engine_doc, + validator=is_one_of_factory(["auto", "sqlalchemy"]), + ) + +# -------- +# Plotting +# --------- + +plotting_backend_doc = """ +: str + The plotting backend to use. The default value is "matplotlib", the + backend provided with pandas. Other backends can be specified by + providing the name of the module that implements the backend. +""" + + +def register_plotting_backend_cb(key) -> None: + if key == "matplotlib": + # We defer matplotlib validation, since it's the default + return + from pandas.plotting._core import _get_plot_backend + + _get_plot_backend(key) + + +with cf.config_prefix("plotting"): + cf.register_option( + "backend", + defval="matplotlib", + doc=plotting_backend_doc, + validator=register_plotting_backend_cb, + ) + + +register_converter_doc = """ +: bool or 'auto'. + Whether to register converters with matplotlib's units registry for + dates, times, datetimes, and Periods. Toggling to False will remove + the converters, restoring any converters that pandas overwrote. +""" + + +def register_converter_cb(key) -> None: + from pandas.plotting import ( + deregister_matplotlib_converters, + register_matplotlib_converters, + ) + + if cf.get_option(key): + register_matplotlib_converters() + else: + deregister_matplotlib_converters() + + +with cf.config_prefix("plotting.matplotlib"): + cf.register_option( + "register_converters", + "auto", + register_converter_doc, + validator=is_one_of_factory(["auto", True, False]), + cb=register_converter_cb, + ) + +# ------ +# Styler +# ------ + +styler_sparse_index_doc = """ +: bool + Whether to sparsify the display of a hierarchical index. Setting to False will + display each explicit level element in a hierarchical key for each row. +""" + +styler_sparse_columns_doc = """ +: bool + Whether to sparsify the display of hierarchical columns. Setting to False will + display each explicit level element in a hierarchical key for each column. +""" + +styler_render_repr = """ +: str + Determine which output to use in Jupyter Notebook in {"html", "latex"}. +""" + +styler_max_elements = """ +: int + The maximum number of data-cell (
) elements that will be rendered before + trimming will occur over columns, rows or both if needed. +""" + +styler_max_rows = """ +: int, optional + The maximum number of rows that will be rendered. May still be reduced to + satisfy ``max_elements``, which takes precedence. +""" + +styler_max_columns = """ +: int, optional + The maximum number of columns that will be rendered. May still be reduced to + satisfy ``max_elements``, which takes precedence. +""" + +styler_precision = """ +: int + The precision for floats and complex numbers. +""" + +styler_decimal = """ +: str + The character representation for the decimal separator for floats and complex. +""" + +styler_thousands = """ +: str, optional + The character representation for thousands separator for floats, int and complex. +""" + +styler_na_rep = """ +: str, optional + The string representation for values identified as missing. +""" + +styler_escape = """ +: str, optional + Whether to escape certain characters according to the given context; html or latex. +""" + +styler_formatter = """ +: str, callable, dict, optional + A formatter object to be used as default within ``Styler.format``. +""" + +styler_multirow_align = """ +: {"c", "t", "b"} + The specifier for vertical alignment of sparsified LaTeX multirows. +""" + +styler_multicol_align = r""" +: {"r", "c", "l", "naive-l", "naive-r"} + The specifier for horizontal alignment of sparsified LaTeX multicolumns. Pipe + decorators can also be added to non-naive values to draw vertical + rules, e.g. "\|r" will draw a rule on the left side of right aligned merged cells. +""" + +styler_hrules = """ +: bool + Whether to add horizontal rules on top and bottom and below the headers. +""" + +styler_environment = """ +: str + The environment to replace ``\\begin{table}``. If "longtable" is used results + in a specific longtable environment format. +""" + +styler_encoding = """ +: str + The encoding used for output HTML and LaTeX files. +""" + +styler_mathjax = """ +: bool + If False will render special CSS classes to table attributes that indicate Mathjax + will not be used in Jupyter Notebook. +""" + +with cf.config_prefix("styler"): + cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool) + + cf.register_option( + "sparse.columns", True, styler_sparse_columns_doc, validator=is_bool + ) + + cf.register_option( + "render.repr", + "html", + styler_render_repr, + validator=is_one_of_factory(["html", "latex"]), + ) + + cf.register_option( + "render.max_elements", + 2**18, + styler_max_elements, + validator=is_nonnegative_int, + ) + + cf.register_option( + "render.max_rows", + None, + styler_max_rows, + validator=is_nonnegative_int, + ) + + cf.register_option( + "render.max_columns", + None, + styler_max_columns, + validator=is_nonnegative_int, + ) + + cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str) + + cf.register_option("format.decimal", ".", styler_decimal, validator=is_str) + + cf.register_option( + "format.precision", 6, styler_precision, validator=is_nonnegative_int + ) + + cf.register_option( + "format.thousands", + None, + styler_thousands, + validator=is_instance_factory([type(None), str]), + ) + + cf.register_option( + "format.na_rep", + None, + styler_na_rep, + validator=is_instance_factory([type(None), str]), + ) + + cf.register_option( + "format.escape", + None, + styler_escape, + validator=is_one_of_factory([None, "html", "latex", "latex-math"]), + ) + + cf.register_option( + "format.formatter", + None, + styler_formatter, + validator=is_instance_factory([type(None), dict, Callable, str]), + ) + + cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool) + + cf.register_option( + "latex.multirow_align", + "c", + styler_multirow_align, + validator=is_one_of_factory(["c", "t", "b", "naive"]), + ) + + val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"] + val_mca += ["naive-l", "naive-r"] + cf.register_option( + "latex.multicol_align", + "r", + styler_multicol_align, + validator=is_one_of_factory(val_mca), + ) + + cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool) + + cf.register_option( + "latex.environment", + None, + styler_environment, + validator=is_instance_factory([type(None), str]), + ) + + +with cf.config_prefix("future"): + cf.register_option( + "infer_string", + False, + "Whether to infer sequence of str objects as pyarrow string " + "dtype, which will be the default in pandas 3.0 " + "(at which point this option will be deprecated).", + validator=is_one_of_factory([True, False]), + ) + + cf.register_option( + "no_silent_downcasting", + False, + "Whether to opt-in to the future behavior which will *not* silently " + "downcast results from Series and DataFrame `where`, `mask`, and `clip` " + "methods. " + "Silent downcasting will be removed in pandas 3.0 " + "(at which point this option will be deprecated).", + validator=is_one_of_factory([True, False]), + ) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/construction.py b/videollama2/lib/python3.10/site-packages/pandas/core/construction.py new file mode 100644 index 0000000000000000000000000000000000000000..f8250ae475a10317c7731c10c475e88cc33f030b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/construction.py @@ -0,0 +1,824 @@ +""" +Constructor functions intended to be shared by pd.array, Series.__init__, +and Index.__new__. + +These should not depend on core.internals. +""" +from __future__ import annotations + +from collections.abc import Sequence +from typing import ( + TYPE_CHECKING, + Optional, + Union, + cast, + overload, +) +import warnings + +import numpy as np +from numpy import ma + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas._libs.tslibs import ( + Period, + get_supported_dtype, + is_supported_dtype, +) +from pandas._typing import ( + AnyArrayLike, + ArrayLike, + Dtype, + DtypeObj, + T, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.cast import ( + construct_1d_arraylike_from_scalar, + construct_1d_object_array_from_listlike, + maybe_cast_to_datetime, + maybe_cast_to_integer_array, + maybe_convert_platform, + maybe_infer_to_datetimelike, + maybe_promote, +) +from pandas.core.dtypes.common import ( + is_list_like, + is_object_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import NumpyEADtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCExtensionArray, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import isna + +import pandas.core.common as com + +if TYPE_CHECKING: + from pandas import ( + Index, + Series, + ) + from pandas.core.arrays.base import ExtensionArray + + +def array( + data: Sequence[object] | AnyArrayLike, + dtype: Dtype | None = None, + copy: bool = True, +) -> ExtensionArray: + """ + Create an array. + + Parameters + ---------- + data : Sequence of objects + The scalars inside `data` should be instances of the + scalar type for `dtype`. It's expected that `data` + represents a 1-dimensional array of data. + + When `data` is an Index or Series, the underlying array + will be extracted from `data`. + + dtype : str, np.dtype, or ExtensionDtype, optional + The dtype to use for the array. This may be a NumPy + dtype or an extension type registered with pandas using + :meth:`pandas.api.extensions.register_extension_dtype`. + + If not specified, there are two possibilities: + + 1. When `data` is a :class:`Series`, :class:`Index`, or + :class:`ExtensionArray`, the `dtype` will be taken + from the data. + 2. Otherwise, pandas will attempt to infer the `dtype` + from the data. + + Note that when `data` is a NumPy array, ``data.dtype`` is + *not* used for inferring the array type. This is because + NumPy cannot represent all the types of data that can be + held in extension arrays. + + Currently, pandas will infer an extension dtype for sequences of + + ============================== ======================================= + Scalar Type Array Type + ============================== ======================================= + :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray` + :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` + :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` + :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` + :class:`int` :class:`pandas.arrays.IntegerArray` + :class:`float` :class:`pandas.arrays.FloatingArray` + :class:`str` :class:`pandas.arrays.StringArray` or + :class:`pandas.arrays.ArrowStringArray` + :class:`bool` :class:`pandas.arrays.BooleanArray` + ============================== ======================================= + + The ExtensionArray created when the scalar type is :class:`str` is determined by + ``pd.options.mode.string_storage`` if the dtype is not explicitly given. + + For all other cases, NumPy's usual inference rules will be used. + copy : bool, default True + Whether to copy the data, even if not necessary. Depending + on the type of `data`, creating the new array may require + copying data, even if ``copy=False``. + + Returns + ------- + ExtensionArray + The newly created array. + + Raises + ------ + ValueError + When `data` is not 1-dimensional. + + See Also + -------- + numpy.array : Construct a NumPy array. + Series : Construct a pandas Series. + Index : Construct a pandas Index. + arrays.NumpyExtensionArray : ExtensionArray wrapping a NumPy array. + Series.array : Extract the array stored within a Series. + + Notes + ----- + Omitting the `dtype` argument means pandas will attempt to infer the + best array type from the values in the data. As new array types are + added by pandas and 3rd party libraries, the "best" array type may + change. We recommend specifying `dtype` to ensure that + + 1. the correct array type for the data is returned + 2. the returned array type doesn't change as new extension types + are added by pandas and third-party libraries + + Additionally, if the underlying memory representation of the returned + array matters, we recommend specifying the `dtype` as a concrete object + rather than a string alias or allowing it to be inferred. For example, + a future version of pandas or a 3rd-party library may include a + dedicated ExtensionArray for string data. In this event, the following + would no longer return a :class:`arrays.NumpyExtensionArray` backed by a + NumPy array. + + >>> pd.array(['a', 'b'], dtype=str) + + ['a', 'b'] + Length: 2, dtype: str32 + + This would instead return the new ExtensionArray dedicated for string + data. If you really need the new array to be backed by a NumPy array, + specify that in the dtype. + + >>> pd.array(['a', 'b'], dtype=np.dtype(" + ['a', 'b'] + Length: 2, dtype: str32 + + Finally, Pandas has arrays that mostly overlap with NumPy + + * :class:`arrays.DatetimeArray` + * :class:`arrays.TimedeltaArray` + + When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is + passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray`` + rather than a ``NumpyExtensionArray``. This is for symmetry with the case of + timezone-aware data, which NumPy does not natively support. + + >>> pd.array(['2015', '2016'], dtype='datetime64[ns]') + + ['2015-01-01 00:00:00', '2016-01-01 00:00:00'] + Length: 2, dtype: datetime64[ns] + + >>> pd.array(["1h", "2h"], dtype='timedelta64[ns]') + + ['0 days 01:00:00', '0 days 02:00:00'] + Length: 2, dtype: timedelta64[ns] + + Examples + -------- + If a dtype is not specified, pandas will infer the best dtype from the values. + See the description of `dtype` for the types pandas infers for. + + >>> pd.array([1, 2]) + + [1, 2] + Length: 2, dtype: Int64 + + >>> pd.array([1, 2, np.nan]) + + [1, 2, ] + Length: 3, dtype: Int64 + + >>> pd.array([1.1, 2.2]) + + [1.1, 2.2] + Length: 2, dtype: Float64 + + >>> pd.array(["a", None, "c"]) + + ['a', , 'c'] + Length: 3, dtype: string + + >>> with pd.option_context("string_storage", "pyarrow"): + ... arr = pd.array(["a", None, "c"]) + ... + >>> arr + + ['a', , 'c'] + Length: 3, dtype: string + + >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")]) + + ['2000-01-01', '2000-01-01'] + Length: 2, dtype: period[D] + + You can use the string alias for `dtype` + + >>> pd.array(['a', 'b', 'a'], dtype='category') + ['a', 'b', 'a'] + Categories (2, object): ['a', 'b'] + + Or specify the actual dtype + + >>> pd.array(['a', 'b', 'a'], + ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True)) + ['a', 'b', 'a'] + Categories (3, object): ['a' < 'b' < 'c'] + + If pandas does not infer a dedicated extension type a + :class:`arrays.NumpyExtensionArray` is returned. + + >>> pd.array([1 + 1j, 3 + 2j]) + + [(1+1j), (3+2j)] + Length: 2, dtype: complex128 + + As mentioned in the "Notes" section, new extension types may be added + in the future (by pandas or 3rd party libraries), causing the return + value to no longer be a :class:`arrays.NumpyExtensionArray`. Specify the + `dtype` as a NumPy dtype if you need to ensure there's no future change in + behavior. + + >>> pd.array([1, 2], dtype=np.dtype("int32")) + + [1, 2] + Length: 2, dtype: int32 + + `data` must be 1-dimensional. A ValueError is raised when the input + has the wrong dimensionality. + + >>> pd.array(1) + Traceback (most recent call last): + ... + ValueError: Cannot pass scalar '1' to 'pandas.array'. + """ + from pandas.core.arrays import ( + BooleanArray, + DatetimeArray, + ExtensionArray, + FloatingArray, + IntegerArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, + ) + from pandas.core.arrays.string_ import StringDtype + + if lib.is_scalar(data): + msg = f"Cannot pass scalar '{data}' to 'pandas.array'." + raise ValueError(msg) + elif isinstance(data, ABCDataFrame): + raise TypeError("Cannot pass DataFrame to 'pandas.array'") + + if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)): + # Note: we exclude np.ndarray here, will do type inference on it + dtype = data.dtype + + data = extract_array(data, extract_numpy=True) + + # this returns None for not-found dtypes. + if dtype is not None: + dtype = pandas_dtype(dtype) + + if isinstance(data, ExtensionArray) and (dtype is None or data.dtype == dtype): + # e.g. TimedeltaArray[s], avoid casting to NumpyExtensionArray + if copy: + return data.copy() + return data + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + return cls._from_sequence(data, dtype=dtype, copy=copy) + + if dtype is None: + inferred_dtype = lib.infer_dtype(data, skipna=True) + if inferred_dtype == "period": + period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data) + return PeriodArray._from_sequence(period_data, copy=copy) + + elif inferred_dtype == "interval": + return IntervalArray(data, copy=copy) + + elif inferred_dtype.startswith("datetime"): + # datetime, datetime64 + try: + return DatetimeArray._from_sequence(data, copy=copy) + except ValueError: + # Mixture of timezones, fall back to NumpyExtensionArray + pass + + elif inferred_dtype.startswith("timedelta"): + # timedelta, timedelta64 + return TimedeltaArray._from_sequence(data, copy=copy) + + elif inferred_dtype == "string": + # StringArray/ArrowStringArray depending on pd.options.mode.string_storage + dtype = StringDtype() + cls = dtype.construct_array_type() + return cls._from_sequence(data, dtype=dtype, copy=copy) + + elif inferred_dtype == "integer": + return IntegerArray._from_sequence(data, copy=copy) + elif inferred_dtype == "empty" and not hasattr(data, "dtype") and not len(data): + return FloatingArray._from_sequence(data, copy=copy) + elif ( + inferred_dtype in ("floating", "mixed-integer-float") + and getattr(data, "dtype", None) != np.float16 + ): + # GH#44715 Exclude np.float16 bc FloatingArray does not support it; + # we will fall back to NumpyExtensionArray. + return FloatingArray._from_sequence(data, copy=copy) + + elif inferred_dtype == "boolean": + return BooleanArray._from_sequence(data, dtype="boolean", copy=copy) + + # Pandas overrides NumPy for + # 1. datetime64[ns,us,ms,s] + # 2. timedelta64[ns,us,ms,s] + # so that a DatetimeArray is returned. + if lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype): + return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) + if lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype): + return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) + + elif lib.is_np_dtype(dtype, "mM"): + warnings.warn( + r"datetime64 and timedelta64 dtype resolutions other than " + r"'s', 'ms', 'us', and 'ns' are deprecated. " + r"In future releases passing unsupported resolutions will " + r"raise an exception.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return NumpyExtensionArray._from_sequence(data, dtype=dtype, copy=copy) + + +_typs = frozenset( + { + "index", + "rangeindex", + "multiindex", + "datetimeindex", + "timedeltaindex", + "periodindex", + "categoricalindex", + "intervalindex", + "series", + } +) + + +@overload +def extract_array( + obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ... +) -> ArrayLike: + ... + + +@overload +def extract_array( + obj: T, extract_numpy: bool = ..., extract_range: bool = ... +) -> T | ArrayLike: + ... + + +def extract_array( + obj: T, extract_numpy: bool = False, extract_range: bool = False +) -> T | ArrayLike: + """ + Extract the ndarray or ExtensionArray from a Series or Index. + + For all other types, `obj` is just returned as is. + + Parameters + ---------- + obj : object + For Series / Index, the underlying ExtensionArray is unboxed. + + extract_numpy : bool, default False + Whether to extract the ndarray from a NumpyExtensionArray. + + extract_range : bool, default False + If we have a RangeIndex, return range._values if True + (which is a materialized integer ndarray), otherwise return unchanged. + + Returns + ------- + arr : object + + Examples + -------- + >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) + ['a', 'b', 'c'] + Categories (3, object): ['a', 'b', 'c'] + + Other objects like lists, arrays, and DataFrames are just passed through. + + >>> extract_array([1, 2, 3]) + [1, 2, 3] + + For an ndarray-backed Series / Index the ndarray is returned. + + >>> extract_array(pd.Series([1, 2, 3])) + array([1, 2, 3]) + + To extract all the way down to the ndarray, pass ``extract_numpy=True``. + + >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) + array([1, 2, 3]) + """ + typ = getattr(obj, "_typ", None) + if typ in _typs: + # i.e. isinstance(obj, (ABCIndex, ABCSeries)) + if typ == "rangeindex": + if extract_range: + # error: "T" has no attribute "_values" + return obj._values # type: ignore[attr-defined] + return obj + + # error: "T" has no attribute "_values" + return obj._values # type: ignore[attr-defined] + + elif extract_numpy and typ == "npy_extension": + # i.e. isinstance(obj, ABCNumpyExtensionArray) + # error: "T" has no attribute "to_numpy" + return obj.to_numpy() # type: ignore[attr-defined] + + return obj + + +def ensure_wrapped_if_datetimelike(arr): + """ + Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray. + """ + if isinstance(arr, np.ndarray): + if arr.dtype.kind == "M": + from pandas.core.arrays import DatetimeArray + + dtype = get_supported_dtype(arr.dtype) + return DatetimeArray._from_sequence(arr, dtype=dtype) + + elif arr.dtype.kind == "m": + from pandas.core.arrays import TimedeltaArray + + dtype = get_supported_dtype(arr.dtype) + return TimedeltaArray._from_sequence(arr, dtype=dtype) + + return arr + + +def sanitize_masked_array(data: ma.MaskedArray) -> np.ndarray: + """ + Convert numpy MaskedArray to ensure mask is softened. + """ + mask = ma.getmaskarray(data) + if mask.any(): + dtype, fill_value = maybe_promote(data.dtype, np.nan) + dtype = cast(np.dtype, dtype) + data = ma.asarray(data.astype(dtype, copy=True)) + data.soften_mask() # set hardmask False if it was True + data[mask] = fill_value + else: + data = data.copy() + return data + + +def sanitize_array( + data, + index: Index | None, + dtype: DtypeObj | None = None, + copy: bool = False, + *, + allow_2d: bool = False, +) -> ArrayLike: + """ + Sanitize input data to an ndarray or ExtensionArray, copy if specified, + coerce to the dtype if specified. + + Parameters + ---------- + data : Any + index : Index or None, default None + dtype : np.dtype, ExtensionDtype, or None, default None + copy : bool, default False + allow_2d : bool, default False + If False, raise if we have a 2D Arraylike. + + Returns + ------- + np.ndarray or ExtensionArray + """ + original_dtype = dtype + if isinstance(data, ma.MaskedArray): + data = sanitize_masked_array(data) + + if isinstance(dtype, NumpyEADtype): + # Avoid ending up with a NumpyExtensionArray + dtype = dtype.numpy_dtype + + object_index = False + if isinstance(data, ABCIndex) and data.dtype == object and dtype is None: + object_index = True + + # extract ndarray or ExtensionArray, ensure we have no NumpyExtensionArray + data = extract_array(data, extract_numpy=True, extract_range=True) + + if isinstance(data, np.ndarray) and data.ndim == 0: + if dtype is None: + dtype = data.dtype + data = lib.item_from_zerodim(data) + elif isinstance(data, range): + # GH#16804 + data = range_to_ndarray(data) + copy = False + + if not is_list_like(data): + if index is None: + raise ValueError("index must be specified when data is not list-like") + if ( + isinstance(data, str) + and using_pyarrow_string_dtype() + and original_dtype is None + ): + from pandas.core.arrays.string_ import StringDtype + + dtype = StringDtype("pyarrow_numpy") + data = construct_1d_arraylike_from_scalar(data, len(index), dtype) + + return data + + elif isinstance(data, ABCExtensionArray): + # it is already ensured above this is not a NumpyExtensionArray + # Until GH#49309 is fixed this check needs to come before the + # ExtensionDtype check + if dtype is not None: + subarr = data.astype(dtype, copy=copy) + elif copy: + subarr = data.copy() + else: + subarr = data + + elif isinstance(dtype, ExtensionDtype): + # create an extension array from its dtype + _sanitize_non_ordered(data) + cls = dtype.construct_array_type() + subarr = cls._from_sequence(data, dtype=dtype, copy=copy) + + # GH#846 + elif isinstance(data, np.ndarray): + if isinstance(data, np.matrix): + data = data.A + + if dtype is None: + subarr = data + if data.dtype == object: + subarr = maybe_infer_to_datetimelike(data) + if ( + object_index + and using_pyarrow_string_dtype() + and is_string_dtype(subarr) + ): + # Avoid inference when string option is set + subarr = data + elif data.dtype.kind == "U" and using_pyarrow_string_dtype(): + from pandas.core.arrays.string_ import StringDtype + + dtype = StringDtype(storage="pyarrow_numpy") + subarr = dtype.construct_array_type()._from_sequence(data, dtype=dtype) + + if subarr is data and copy: + subarr = subarr.copy() + + else: + # we will try to copy by-definition here + subarr = _try_cast(data, dtype, copy) + + elif hasattr(data, "__array__"): + # e.g. dask array GH#38645 + if not copy: + data = np.asarray(data) + else: + data = np.array(data, copy=copy) + return sanitize_array( + data, + index=index, + dtype=dtype, + copy=False, + allow_2d=allow_2d, + ) + + else: + _sanitize_non_ordered(data) + # materialize e.g. generators, convert e.g. tuples, abc.ValueView + data = list(data) + + if len(data) == 0 and dtype is None: + # We default to float64, matching numpy + subarr = np.array([], dtype=np.float64) + + elif dtype is not None: + subarr = _try_cast(data, dtype, copy) + + else: + subarr = maybe_convert_platform(data) + if subarr.dtype == object: + subarr = cast(np.ndarray, subarr) + subarr = maybe_infer_to_datetimelike(subarr) + + subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d) + + if isinstance(subarr, np.ndarray): + # at this point we should have dtype be None or subarr.dtype == dtype + dtype = cast(np.dtype, dtype) + subarr = _sanitize_str_dtypes(subarr, data, dtype, copy) + + return subarr + + +def range_to_ndarray(rng: range) -> np.ndarray: + """ + Cast a range object to ndarray. + """ + # GH#30171 perf avoid realizing range as a list in np.array + try: + arr = np.arange(rng.start, rng.stop, rng.step, dtype="int64") + except OverflowError: + # GH#30173 handling for ranges that overflow int64 + if (rng.start >= 0 and rng.step > 0) or (rng.step < 0 <= rng.stop): + try: + arr = np.arange(rng.start, rng.stop, rng.step, dtype="uint64") + except OverflowError: + arr = construct_1d_object_array_from_listlike(list(rng)) + else: + arr = construct_1d_object_array_from_listlike(list(rng)) + return arr + + +def _sanitize_non_ordered(data) -> None: + """ + Raise only for unordered sets, e.g., not for dict_keys + """ + if isinstance(data, (set, frozenset)): + raise TypeError(f"'{type(data).__name__}' type is unordered") + + +def _sanitize_ndim( + result: ArrayLike, + data, + dtype: DtypeObj | None, + index: Index | None, + *, + allow_2d: bool = False, +) -> ArrayLike: + """ + Ensure we have a 1-dimensional result array. + """ + if getattr(result, "ndim", 0) == 0: + raise ValueError("result should be arraylike with ndim > 0") + + if result.ndim == 1: + # the result that we want + result = _maybe_repeat(result, index) + + elif result.ndim > 1: + if isinstance(data, np.ndarray): + if allow_2d: + return result + raise ValueError( + f"Data must be 1-dimensional, got ndarray of shape {data.shape} instead" + ) + if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype): + # i.e. NumpyEADtype("O") + + result = com.asarray_tuplesafe(data, dtype=np.dtype("object")) + cls = dtype.construct_array_type() + result = cls._from_sequence(result, dtype=dtype) + else: + # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type + # "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[str, + # dtype[Any], None]" + result = com.asarray_tuplesafe(data, dtype=dtype) # type: ignore[arg-type] + return result + + +def _sanitize_str_dtypes( + result: np.ndarray, data, dtype: np.dtype | None, copy: bool +) -> np.ndarray: + """ + Ensure we have a dtype that is supported by pandas. + """ + + # This is to prevent mixed-type Series getting all casted to + # NumPy string type, e.g. NaN --> '-1#IND'. + if issubclass(result.dtype.type, str): + # GH#16605 + # If not empty convert the data to dtype + # GH#19853: If data is a scalar, result has already the result + if not lib.is_scalar(data): + if not np.all(isna(data)): + data = np.asarray(data, dtype=dtype) + if not copy: + result = np.asarray(data, dtype=object) + else: + result = np.array(data, dtype=object, copy=copy) + return result + + +def _maybe_repeat(arr: ArrayLike, index: Index | None) -> ArrayLike: + """ + If we have a length-1 array and an index describing how long we expect + the result to be, repeat the array. + """ + if index is not None: + if 1 == len(arr) != len(index): + arr = arr.repeat(len(index)) + return arr + + +def _try_cast( + arr: list | np.ndarray, + dtype: np.dtype, + copy: bool, +) -> ArrayLike: + """ + Convert input to numpy ndarray and optionally cast to a given dtype. + + Parameters + ---------- + arr : ndarray or list + Excludes: ExtensionArray, Series, Index. + dtype : np.dtype + copy : bool + If False, don't copy the data if not needed. + + Returns + ------- + np.ndarray or ExtensionArray + """ + is_ndarray = isinstance(arr, np.ndarray) + + if dtype == object: + if not is_ndarray: + subarr = construct_1d_object_array_from_listlike(arr) + return subarr + return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy) + + elif dtype.kind == "U": + # TODO: test cases with arr.dtype.kind in "mM" + if is_ndarray: + arr = cast(np.ndarray, arr) + shape = arr.shape + if arr.ndim > 1: + arr = arr.ravel() + else: + shape = (len(arr),) + return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape( + shape + ) + + elif dtype.kind in "mM": + return maybe_cast_to_datetime(arr, dtype) + + # GH#15832: Check if we are requesting a numeric dtype and + # that we can convert the data to the requested dtype. + elif dtype.kind in "iu": + # this will raise if we have e.g. floats + + subarr = maybe_cast_to_integer_array(arr, dtype) + elif not copy: + subarr = np.asarray(arr, dtype=dtype) + else: + subarr = np.array(arr, dtype=dtype, copy=copy) + + return subarr diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/flags.py b/videollama2/lib/python3.10/site-packages/pandas/core/flags.py new file mode 100644 index 0000000000000000000000000000000000000000..aff7a15f283bafe1459173070e64df4caef0d45d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/flags.py @@ -0,0 +1,117 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +import weakref + +if TYPE_CHECKING: + from pandas.core.generic import NDFrame + + +class Flags: + """ + Flags that apply to pandas objects. + + Parameters + ---------- + obj : Series or DataFrame + The object these flags are associated with. + allows_duplicate_labels : bool, default True + Whether to allow duplicate labels in this object. By default, + duplicate labels are permitted. Setting this to ``False`` will + cause an :class:`errors.DuplicateLabelError` to be raised when + `index` (or columns for DataFrame) is not unique, or any + subsequent operation on introduces duplicates. + See :ref:`duplicates.disallow` for more. + + .. warning:: + + This is an experimental feature. Currently, many methods fail to + propagate the ``allows_duplicate_labels`` value. In future versions + it is expected that every method taking or returning one or more + DataFrame or Series objects will propagate ``allows_duplicate_labels``. + + Examples + -------- + Attributes can be set in two ways: + + >>> df = pd.DataFrame() + >>> df.flags + + >>> df.flags.allows_duplicate_labels = False + >>> df.flags + + + >>> df.flags['allows_duplicate_labels'] = True + >>> df.flags + + """ + + _keys: set[str] = {"allows_duplicate_labels"} + + def __init__(self, obj: NDFrame, *, allows_duplicate_labels: bool) -> None: + self._allows_duplicate_labels = allows_duplicate_labels + self._obj = weakref.ref(obj) + + @property + def allows_duplicate_labels(self) -> bool: + """ + Whether this object allows duplicate labels. + + Setting ``allows_duplicate_labels=False`` ensures that the + index (and columns of a DataFrame) are unique. Most methods + that accept and return a Series or DataFrame will propagate + the value of ``allows_duplicate_labels``. + + See :ref:`duplicates` for more. + + See Also + -------- + DataFrame.attrs : Set global metadata on this object. + DataFrame.set_flags : Set global flags on this object. + + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a']) + >>> df.flags.allows_duplicate_labels + True + >>> df.flags.allows_duplicate_labels = False + Traceback (most recent call last): + ... + pandas.errors.DuplicateLabelError: Index has duplicates. + positions + label + a [0, 1] + """ + return self._allows_duplicate_labels + + @allows_duplicate_labels.setter + def allows_duplicate_labels(self, value: bool) -> None: + value = bool(value) + obj = self._obj() + if obj is None: + raise ValueError("This flag's object has been deleted.") + + if not value: + for ax in obj.axes: + ax._maybe_check_unique() + + self._allows_duplicate_labels = value + + def __getitem__(self, key: str): + if key not in self._keys: + raise KeyError(key) + + return getattr(self, key) + + def __setitem__(self, key: str, value) -> None: + if key not in self._keys: + raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}") + setattr(self, key, value) + + def __repr__(self) -> str: + return f"" + + def __eq__(self, other) -> bool: + if isinstance(other, type(self)): + return self.allows_duplicate_labels == other.allows_duplicate_labels + return False diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/frame.py b/videollama2/lib/python3.10/site-packages/pandas/core/frame.py new file mode 100644 index 0000000000000000000000000000000000000000..afcd4d014316e34cd06db90fbea345a2ce0e9daa --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/frame.py @@ -0,0 +1,12697 @@ +""" +DataFrame +--------- +An efficient 2D container for potentially mixed-type time series or other +labeled data series. + +Similar to its R counterpart, data.frame, except providing automatic data +alignment and a host of useful data manipulation methods having to do with the +labeling information +""" +from __future__ import annotations + +import collections +from collections import abc +from collections.abc import ( + Hashable, + Iterable, + Iterator, + Mapping, + Sequence, +) +import functools +from inspect import signature +from io import StringIO +import itertools +import operator +import sys +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + overload, +) +import warnings + +import numpy as np +from numpy import ma + +from pandas._config import ( + get_option, + using_copy_on_write, + warn_copy_on_write, +) +from pandas._config.config import _get_option + +from pandas._libs import ( + algos as libalgos, + lib, + properties, +) +from pandas._libs.hashtable import duplicated +from pandas._libs.lib import is_range_indexer +from pandas.compat import PYPY +from pandas.compat._constants import REF_COUNT +from pandas.compat._optional import import_optional_dependency +from pandas.compat.numpy import function as nv +from pandas.errors import ( + ChainedAssignmentError, + InvalidIndexError, + _chained_assignment_method_msg, + _chained_assignment_msg, + _chained_assignment_warning_method_msg, + _chained_assignment_warning_msg, +) +from pandas.util._decorators import ( + Appender, + Substitution, + deprecate_nonkeyword_arguments, + doc, +) +from pandas.util._exceptions import ( + find_stack_level, + rewrite_warning, +) +from pandas.util._validators import ( + validate_ascending, + validate_bool_kwarg, + validate_percentile, +) + +from pandas.core.dtypes.cast import ( + LossySetitemError, + can_hold_element, + construct_1d_arraylike_from_scalar, + construct_2d_arraylike_from_scalar, + find_common_type, + infer_dtype_from_scalar, + invalidate_string_dtypes, + maybe_box_native, + maybe_downcast_to_dtype, +) +from pandas.core.dtypes.common import ( + infer_dtype_from_object, + is_1d_only_ea_dtype, + is_array_like, + is_bool_dtype, + is_dataclass, + is_dict_like, + is_float, + is_float_dtype, + is_hashable, + is_integer, + is_integer_dtype, + is_iterator, + is_list_like, + is_scalar, + is_sequence, + needs_i8_conversion, + pandas_dtype, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + BaseMaskedDtype, + ExtensionDtype, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import ( + algorithms, + common as com, + nanops, + ops, + roperator, +) +from pandas.core.accessor import CachedAccessor +from pandas.core.apply import reconstruct_and_relabel_result +from pandas.core.array_algos.take import take_2d_multi +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays import ( + BaseMaskedArray, + DatetimeArray, + ExtensionArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.arrays.sparse import SparseFrameAccessor +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + sanitize_array, + sanitize_masked_array, +) +from pandas.core.generic import ( + NDFrame, + make_doc, +) +from pandas.core.indexers import check_key_length +from pandas.core.indexes.api import ( + DatetimeIndex, + Index, + PeriodIndex, + default_index, + ensure_index, + ensure_index_from_sequences, +) +from pandas.core.indexes.multi import ( + MultiIndex, + maybe_droplevels, +) +from pandas.core.indexing import ( + check_bool_indexer, + check_dict_or_set_indexers, +) +from pandas.core.internals import ( + ArrayManager, + BlockManager, +) +from pandas.core.internals.construction import ( + arrays_to_mgr, + dataclasses_to_dicts, + dict_to_mgr, + mgr_to_mgr, + ndarray_to_mgr, + nested_data_to_arrays, + rec_array_to_mgr, + reorder_arrays, + to_arrays, + treat_as_nested, +) +from pandas.core.methods import selectn +from pandas.core.reshape.melt import melt +from pandas.core.series import Series +from pandas.core.shared_docs import _shared_docs +from pandas.core.sorting import ( + get_group_index, + lexsort_indexer, + nargsort, +) + +from pandas.io.common import get_handle +from pandas.io.formats import ( + console, + format as fmt, +) +from pandas.io.formats.info import ( + INFO_DOCSTRING, + DataFrameInfo, + frame_sub_kwargs, +) +import pandas.plotting + +if TYPE_CHECKING: + import datetime + + from pandas._libs.internals import BlockValuesRefs + from pandas._typing import ( + AggFuncType, + AnyAll, + AnyArrayLike, + ArrayLike, + Axes, + Axis, + AxisInt, + ColspaceArgType, + CompressionOptions, + CorrelationMethod, + DropKeep, + Dtype, + DtypeObj, + FilePath, + FloatFormatType, + FormattersType, + Frequency, + FromDictOrient, + IgnoreRaise, + IndexKeyFunc, + IndexLabel, + JoinValidate, + Level, + MergeHow, + MergeValidate, + MutableMappingT, + NaAction, + NaPosition, + NsmallestNlargestKeep, + PythonFuncType, + QuantileInterpolation, + ReadBuffer, + ReindexMethod, + Renamer, + Scalar, + Self, + SequenceNotStr, + SortKind, + StorageOptions, + Suffixes, + ToGbqIfexist, + ToStataByteorder, + ToTimestampHow, + UpdateJoin, + ValueKeyFunc, + WriteBuffer, + XMLParsers, + npt, + ) + + from pandas.core.groupby.generic import DataFrameGroupBy + from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg + from pandas.core.internals import SingleDataManager + + from pandas.io.formats.style import Styler + +# --------------------------------------------------------------------- +# Docstring templates + +_shared_doc_kwargs = { + "axes": "index, columns", + "klass": "DataFrame", + "axes_single_arg": "{0 or 'index', 1 or 'columns'}", + "axis": """axis : {0 or 'index', 1 or 'columns'}, default 0 + If 0 or 'index': apply function to each column. + If 1 or 'columns': apply function to each row.""", + "inplace": """ + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one.""", + "optional_by": """ +by : str or list of str + Name or list of names to sort by. + + - if `axis` is 0 or `'index'` then `by` may contain index + levels and/or column labels. + - if `axis` is 1 or `'columns'` then `by` may contain column + levels and/or index labels.""", + "optional_reindex": """ +labels : array-like, optional + New labels / index to conform the axis specified by 'axis' to. +index : array-like, optional + New labels for the index. Preferably an Index object to avoid + duplicating data. +columns : array-like, optional + New labels for the columns. Preferably an Index object to avoid + duplicating data. +axis : int or str, optional + Axis to target. Can be either the axis name ('index', 'columns') + or number (0, 1).""", +} + +_merge_doc = """ +Merge DataFrame or named Series objects with a database-style join. + +A named Series object is treated as a DataFrame with a single named column. + +The join is done on columns or indexes. If joining columns on +columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes +on indexes or indexes on a column or columns, the index will be passed on. +When performing a cross merge, no column specifications to merge on are +allowed. + +.. warning:: + + If both key columns contain rows where the key is a null value, those + rows will be matched against each other. This is different from usual SQL + join behaviour and can lead to unexpected results. + +Parameters +----------%s +right : DataFrame or named Series + Object to merge with. +how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner' + Type of merge to be performed. + + * left: use only keys from left frame, similar to a SQL left outer join; + preserve key order. + * right: use only keys from right frame, similar to a SQL right outer join; + preserve key order. + * outer: use union of keys from both frames, similar to a SQL full outer + join; sort keys lexicographically. + * inner: use intersection of keys from both frames, similar to a SQL inner + join; preserve the order of the left keys. + * cross: creates the cartesian product from both frames, preserves the order + of the left keys. +on : label or list + Column or index level names to join on. These must be found in both + DataFrames. If `on` is None and not merging on indexes then this defaults + to the intersection of the columns in both DataFrames. +left_on : label or list, or array-like + Column or index level names to join on in the left DataFrame. Can also + be an array or list of arrays of the length of the left DataFrame. + These arrays are treated as if they are columns. +right_on : label or list, or array-like + Column or index level names to join on in the right DataFrame. Can also + be an array or list of arrays of the length of the right DataFrame. + These arrays are treated as if they are columns. +left_index : bool, default False + Use the index from the left DataFrame as the join key(s). If it is a + MultiIndex, the number of keys in the other DataFrame (either the index + or a number of columns) must match the number of levels. +right_index : bool, default False + Use the index from the right DataFrame as the join key. Same caveats as + left_index. +sort : bool, default False + Sort the join keys lexicographically in the result DataFrame. If False, + the order of the join keys depends on the join type (how keyword). +suffixes : list-like, default is ("_x", "_y") + A length-2 sequence where each element is optionally a string + indicating the suffix to add to overlapping column names in + `left` and `right` respectively. Pass a value of `None` instead + of a string to indicate that the column name from `left` or + `right` should be left as-is, with no suffix. At least one of the + values must not be None. +copy : bool, default True + If False, avoid copy if possible. + + .. note:: + The `copy` keyword will change behavior in pandas 3.0. + `Copy-on-Write + `__ + will be enabled by default, which means that all methods with a + `copy` keyword will use a lazy copy mechanism to defer the copy and + ignore the `copy` keyword. The `copy` keyword will be removed in a + future version of pandas. + + You can already get the future behavior and improvements through + enabling copy on write ``pd.options.mode.copy_on_write = True`` +indicator : bool or str, default False + If True, adds a column to the output DataFrame called "_merge" with + information on the source of each row. The column can be given a different + name by providing a string argument. The column will have a Categorical + type with the value of "left_only" for observations whose merge key only + appears in the left DataFrame, "right_only" for observations + whose merge key only appears in the right DataFrame, and "both" + if the observation's merge key is found in both DataFrames. + +validate : str, optional + If specified, checks if merge is of specified type. + + * "one_to_one" or "1:1": check if merge keys are unique in both + left and right datasets. + * "one_to_many" or "1:m": check if merge keys are unique in left + dataset. + * "many_to_one" or "m:1": check if merge keys are unique in right + dataset. + * "many_to_many" or "m:m": allowed, but does not result in checks. + +Returns +------- +DataFrame + A DataFrame of the two merged objects. + +See Also +-------- +merge_ordered : Merge with optional filling/interpolation. +merge_asof : Merge on nearest keys. +DataFrame.join : Similar method using indices. + +Examples +-------- +>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], +... 'value': [1, 2, 3, 5]}) +>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], +... 'value': [5, 6, 7, 8]}) +>>> df1 + lkey value +0 foo 1 +1 bar 2 +2 baz 3 +3 foo 5 +>>> df2 + rkey value +0 foo 5 +1 bar 6 +2 baz 7 +3 foo 8 + +Merge df1 and df2 on the lkey and rkey columns. The value columns have +the default suffixes, _x and _y, appended. + +>>> df1.merge(df2, left_on='lkey', right_on='rkey') + lkey value_x rkey value_y +0 foo 1 foo 5 +1 foo 1 foo 8 +2 bar 2 bar 6 +3 baz 3 baz 7 +4 foo 5 foo 5 +5 foo 5 foo 8 + +Merge DataFrames df1 and df2 with specified left and right suffixes +appended to any overlapping columns. + +>>> df1.merge(df2, left_on='lkey', right_on='rkey', +... suffixes=('_left', '_right')) + lkey value_left rkey value_right +0 foo 1 foo 5 +1 foo 1 foo 8 +2 bar 2 bar 6 +3 baz 3 baz 7 +4 foo 5 foo 5 +5 foo 5 foo 8 + +Merge DataFrames df1 and df2, but raise an exception if the DataFrames have +any overlapping columns. + +>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False)) +Traceback (most recent call last): +... +ValueError: columns overlap but no suffix specified: + Index(['value'], dtype='object') + +>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]}) +>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]}) +>>> df1 + a b +0 foo 1 +1 bar 2 +>>> df2 + a c +0 foo 3 +1 baz 4 + +>>> df1.merge(df2, how='inner', on='a') + a b c +0 foo 1 3 + +>>> df1.merge(df2, how='left', on='a') + a b c +0 foo 1 3.0 +1 bar 2 NaN + +>>> df1 = pd.DataFrame({'left': ['foo', 'bar']}) +>>> df2 = pd.DataFrame({'right': [7, 8]}) +>>> df1 + left +0 foo +1 bar +>>> df2 + right +0 7 +1 8 + +>>> df1.merge(df2, how='cross') + left right +0 foo 7 +1 foo 8 +2 bar 7 +3 bar 8 +""" + + +# ----------------------------------------------------------------------- +# DataFrame class + + +class DataFrame(NDFrame, OpsMixin): + """ + Two-dimensional, size-mutable, potentially heterogeneous tabular data. + + Data structure also contains labeled axes (rows and columns). + Arithmetic operations align on both row and column labels. Can be + thought of as a dict-like container for Series objects. The primary + pandas data structure. + + Parameters + ---------- + data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame + Dict can contain Series, arrays, constants, dataclass or list-like objects. If + data is a dict, column order follows insertion-order. If a dict contains Series + which have an index defined, it is aligned by its index. This alignment also + occurs if data is a Series or a DataFrame itself. Alignment is done on + Series/DataFrame inputs. + + If data is a list of dicts, column order follows insertion-order. + + index : Index or array-like + Index to use for resulting frame. Will default to RangeIndex if + no indexing information part of input data and no index provided. + columns : Index or array-like + Column labels to use for resulting frame when data does not have them, + defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, + will perform column selection instead. + dtype : dtype, default None + Data type to force. Only a single dtype is allowed. If None, infer. + copy : bool or None, default None + Copy data from inputs. + For dict data, the default of None behaves like ``copy=True``. For DataFrame + or 2d ndarray input, the default of None behaves like ``copy=False``. + If data is a dict containing one or more Series (possibly of different dtypes), + ``copy=False`` will ensure that these inputs are not copied. + + .. versionchanged:: 1.3.0 + + See Also + -------- + DataFrame.from_records : Constructor from tuples, also record arrays. + DataFrame.from_dict : From dicts of Series, arrays, or dicts. + read_csv : Read a comma-separated values (csv) file into DataFrame. + read_table : Read general delimited file into DataFrame. + read_clipboard : Read text from clipboard into DataFrame. + + Notes + ----- + Please reference the :ref:`User Guide ` for more information. + + Examples + -------- + Constructing DataFrame from a dictionary. + + >>> d = {'col1': [1, 2], 'col2': [3, 4]} + >>> df = pd.DataFrame(data=d) + >>> df + col1 col2 + 0 1 3 + 1 2 4 + + Notice that the inferred dtype is int64. + + >>> df.dtypes + col1 int64 + col2 int64 + dtype: object + + To enforce a single dtype: + + >>> df = pd.DataFrame(data=d, dtype=np.int8) + >>> df.dtypes + col1 int8 + col2 int8 + dtype: object + + Constructing DataFrame from a dictionary including Series: + + >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} + >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) + col1 col2 + 0 0 NaN + 1 1 NaN + 2 2 2.0 + 3 3 3.0 + + Constructing DataFrame from numpy ndarray: + + >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), + ... columns=['a', 'b', 'c']) + >>> df2 + a b c + 0 1 2 3 + 1 4 5 6 + 2 7 8 9 + + Constructing DataFrame from a numpy ndarray that has labeled columns: + + >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], + ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) + >>> df3 = pd.DataFrame(data, columns=['c', 'a']) + ... + >>> df3 + c a + 0 3 1 + 1 6 4 + 2 9 7 + + Constructing DataFrame from dataclass: + + >>> from dataclasses import make_dataclass + >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) + >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) + x y + 0 0 0 + 1 0 3 + 2 2 3 + + Constructing DataFrame from Series/DataFrame: + + >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) + >>> df = pd.DataFrame(data=ser, index=["a", "c"]) + >>> df + 0 + a 1 + c 3 + + >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) + >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) + >>> df2 + x + a 1 + c 3 + """ + + _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set + _typ = "dataframe" + _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) + _accessors: set[str] = {"sparse"} + _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) + _mgr: BlockManager | ArrayManager + + # similar to __array_priority__, positions DataFrame before Series, Index, + # and ExtensionArray. Should NOT be overridden by subclasses. + __pandas_priority__ = 4000 + + @property + def _constructor(self) -> Callable[..., DataFrame]: + return DataFrame + + def _constructor_from_mgr(self, mgr, axes) -> DataFrame: + df = DataFrame._from_mgr(mgr, axes=axes) + + if type(self) is DataFrame: + # This would also work `if self._constructor is DataFrame`, but + # this check is slightly faster, benefiting the most-common case. + return df + + elif type(self).__name__ == "GeoDataFrame": + # Shim until geopandas can override their _constructor_from_mgr + # bc they have different behavior for Managers than for DataFrames + return self._constructor(mgr) + + # We assume that the subclass __init__ knows how to handle a + # pd.DataFrame object. + return self._constructor(df) + + _constructor_sliced: Callable[..., Series] = Series + + def _constructor_sliced_from_mgr(self, mgr, axes) -> Series: + ser = Series._from_mgr(mgr, axes) + ser._name = None # caller is responsible for setting real name + + if type(self) is DataFrame: + # This would also work `if self._constructor_sliced is Series`, but + # this check is slightly faster, benefiting the most-common case. + return ser + + # We assume that the subclass __init__ knows how to handle a + # pd.Series object. + return self._constructor_sliced(ser) + + # ---------------------------------------------------------------------- + # Constructors + + def __init__( + self, + data=None, + index: Axes | None = None, + columns: Axes | None = None, + dtype: Dtype | None = None, + copy: bool | None = None, + ) -> None: + allow_mgr = False + if dtype is not None: + dtype = self._validate_dtype(dtype) + + if isinstance(data, DataFrame): + data = data._mgr + allow_mgr = True + if not copy: + # if not copying data, ensure to still return a shallow copy + # to avoid the result sharing the same Manager + data = data.copy(deep=False) + + if isinstance(data, (BlockManager, ArrayManager)): + if not allow_mgr: + # GH#52419 + warnings.warn( + f"Passing a {type(data).__name__} to {type(self).__name__} " + "is deprecated and will raise in a future version. " + "Use public APIs instead.", + DeprecationWarning, + stacklevel=1, # bump to 2 once pyarrow 15.0 is released with fix + ) + + if using_copy_on_write(): + data = data.copy(deep=False) + # first check if a Manager is passed without any other arguments + # -> use fastpath (without checking Manager type) + if index is None and columns is None and dtype is None and not copy: + # GH#33357 fastpath + NDFrame.__init__(self, data) + return + + manager = _get_option("mode.data_manager", silent=True) + + is_pandas_object = isinstance(data, (Series, Index, ExtensionArray)) + data_dtype = getattr(data, "dtype", None) + original_dtype = dtype + + # GH47215 + if isinstance(index, set): + raise ValueError("index cannot be a set") + if isinstance(columns, set): + raise ValueError("columns cannot be a set") + + if copy is None: + if isinstance(data, dict): + # retain pre-GH#38939 default behavior + copy = True + elif ( + manager == "array" + and isinstance(data, (np.ndarray, ExtensionArray)) + and data.ndim == 2 + ): + # INFO(ArrayManager) by default copy the 2D input array to get + # contiguous 1D arrays + copy = True + elif using_copy_on_write() and not isinstance( + data, (Index, DataFrame, Series) + ): + copy = True + else: + copy = False + + if data is None: + index = index if index is not None else default_index(0) + columns = columns if columns is not None else default_index(0) + dtype = dtype if dtype is not None else pandas_dtype(object) + data = [] + + if isinstance(data, (BlockManager, ArrayManager)): + mgr = self._init_mgr( + data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy + ) + + elif isinstance(data, dict): + # GH#38939 de facto copy defaults to False only in non-dict cases + mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) + elif isinstance(data, ma.MaskedArray): + from numpy.ma import mrecords + + # masked recarray + if isinstance(data, mrecords.MaskedRecords): + raise TypeError( + "MaskedRecords are not supported. Pass " + "{name: data[name] for name in data.dtype.names} " + "instead" + ) + + # a masked array + data = sanitize_masked_array(data) + mgr = ndarray_to_mgr( + data, + index, + columns, + dtype=dtype, + copy=copy, + typ=manager, + ) + + elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): + if data.dtype.names: + # i.e. numpy structured array + data = cast(np.ndarray, data) + mgr = rec_array_to_mgr( + data, + index, + columns, + dtype, + copy, + typ=manager, + ) + elif getattr(data, "name", None) is not None: + # i.e. Series/Index with non-None name + _copy = copy if using_copy_on_write() else True + mgr = dict_to_mgr( + # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no + # attribute "name" + {data.name: data}, # type: ignore[union-attr] + index, + columns, + dtype=dtype, + typ=manager, + copy=_copy, + ) + else: + mgr = ndarray_to_mgr( + data, + index, + columns, + dtype=dtype, + copy=copy, + typ=manager, + ) + + # For data is list-like, or Iterable (will consume into list) + elif is_list_like(data): + if not isinstance(data, abc.Sequence): + if hasattr(data, "__array__"): + # GH#44616 big perf improvement for e.g. pytorch tensor + data = np.asarray(data) + else: + data = list(data) + if len(data) > 0: + if is_dataclass(data[0]): + data = dataclasses_to_dicts(data) + if not isinstance(data, np.ndarray) and treat_as_nested(data): + # exclude ndarray as we may have cast it a few lines above + if columns is not None: + columns = ensure_index(columns) + arrays, columns, index = nested_data_to_arrays( + # error: Argument 3 to "nested_data_to_arrays" has incompatible + # type "Optional[Collection[Any]]"; expected "Optional[Index]" + data, + columns, + index, # type: ignore[arg-type] + dtype, + ) + mgr = arrays_to_mgr( + arrays, + columns, + index, + dtype=dtype, + typ=manager, + ) + else: + mgr = ndarray_to_mgr( + data, + index, + columns, + dtype=dtype, + copy=copy, + typ=manager, + ) + else: + mgr = dict_to_mgr( + {}, + index, + columns if columns is not None else default_index(0), + dtype=dtype, + typ=manager, + ) + # For data is scalar + else: + if index is None or columns is None: + raise ValueError("DataFrame constructor not properly called!") + + index = ensure_index(index) + columns = ensure_index(columns) + + if not dtype: + dtype, _ = infer_dtype_from_scalar(data) + + # For data is a scalar extension dtype + if isinstance(dtype, ExtensionDtype): + # TODO(EA2D): special case not needed with 2D EAs + + values = [ + construct_1d_arraylike_from_scalar(data, len(index), dtype) + for _ in range(len(columns)) + ] + mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) + else: + arr2d = construct_2d_arraylike_from_scalar( + data, + len(index), + len(columns), + dtype, + copy, + ) + + mgr = ndarray_to_mgr( + arr2d, + index, + columns, + dtype=arr2d.dtype, + copy=False, + typ=manager, + ) + + # ensure correct Manager type according to settings + mgr = mgr_to_mgr(mgr, typ=manager) + + NDFrame.__init__(self, mgr) + + if original_dtype is None and is_pandas_object and data_dtype == np.object_: + if self.dtypes.iloc[0] != data_dtype: + warnings.warn( + "Dtype inference on a pandas object " + "(Series, Index, ExtensionArray) is deprecated. The DataFrame " + "constructor will keep the original dtype in the future. " + "Call `infer_objects` on the result to get the old " + "behavior.", + FutureWarning, + stacklevel=2, + ) + + # ---------------------------------------------------------------------- + + def __dataframe__( + self, nan_as_null: bool = False, allow_copy: bool = True + ) -> DataFrameXchg: + """ + Return the dataframe interchange object implementing the interchange protocol. + + Parameters + ---------- + nan_as_null : bool, default False + `nan_as_null` is DEPRECATED and has no effect. Please avoid using + it; it will be removed in a future release. + allow_copy : bool, default True + Whether to allow memory copying when exporting. If set to False + it would cause non-zero-copy exports to fail. + + Returns + ------- + DataFrame interchange object + The object which consuming library can use to ingress the dataframe. + + Notes + ----- + Details on the interchange protocol: + https://data-apis.org/dataframe-protocol/latest/index.html + + Examples + -------- + >>> df_not_necessarily_pandas = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> interchange_object = df_not_necessarily_pandas.__dataframe__() + >>> interchange_object.column_names() + Index(['A', 'B'], dtype='object') + >>> df_pandas = (pd.api.interchange.from_dataframe + ... (interchange_object.select_columns_by_name(['A']))) + >>> df_pandas + A + 0 1 + 1 2 + + These methods (``column_names``, ``select_columns_by_name``) should work + for any dataframe library which implements the interchange protocol. + """ + + from pandas.core.interchange.dataframe import PandasDataFrameXchg + + return PandasDataFrameXchg(self, allow_copy=allow_copy) + + def __dataframe_consortium_standard__( + self, *, api_version: str | None = None + ) -> Any: + """ + Provide entry point to the Consortium DataFrame Standard API. + + This is developed and maintained outside of pandas. + Please report any issues to https://github.com/data-apis/dataframe-api-compat. + """ + dataframe_api_compat = import_optional_dependency("dataframe_api_compat") + convert_to_standard_compliant_dataframe = ( + dataframe_api_compat.pandas_standard.convert_to_standard_compliant_dataframe + ) + return convert_to_standard_compliant_dataframe(self, api_version=api_version) + + def __arrow_c_stream__(self, requested_schema=None): + """ + Export the pandas DataFrame as an Arrow C stream PyCapsule. + + This relies on pyarrow to convert the pandas DataFrame to the Arrow + format (and follows the default behaviour of ``pyarrow.Table.from_pandas`` + in its handling of the index, i.e. store the index as a column except + for RangeIndex). + This conversion is not necessarily zero-copy. + + Parameters + ---------- + requested_schema : PyCapsule, default None + The schema to which the dataframe should be casted, passed as a + PyCapsule containing a C ArrowSchema representation of the + requested schema. + + Returns + ------- + PyCapsule + """ + pa = import_optional_dependency("pyarrow", min_version="14.0.0") + if requested_schema is not None: + requested_schema = pa.Schema._import_from_c_capsule(requested_schema) + table = pa.Table.from_pandas(self, schema=requested_schema) + return table.__arrow_c_stream__() + + # ---------------------------------------------------------------------- + + @property + def axes(self) -> list[Index]: + """ + Return a list representing the axes of the DataFrame. + + It has the row axis labels and column axis labels as the only members. + They are returned in that order. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df.axes + [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], + dtype='object')] + """ + return [self.index, self.columns] + + @property + def shape(self) -> tuple[int, int]: + """ + Return a tuple representing the dimensionality of the DataFrame. + + See Also + -------- + ndarray.shape : Tuple of array dimensions. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df.shape + (2, 2) + + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], + ... 'col3': [5, 6]}) + >>> df.shape + (2, 3) + """ + return len(self.index), len(self.columns) + + @property + def _is_homogeneous_type(self) -> bool: + """ + Whether all the columns in a DataFrame have the same type. + + Returns + ------- + bool + + Examples + -------- + >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type + True + >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type + False + + Items with the same type but different sizes are considered + different types. + + >>> DataFrame({ + ... "A": np.array([1, 2], dtype=np.int32), + ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type + False + """ + # The "<" part of "<=" here is for empty DataFrame cases + return len({arr.dtype for arr in self._mgr.arrays}) <= 1 + + @property + def _can_fast_transpose(self) -> bool: + """ + Can we transpose this DataFrame without creating any new array objects. + """ + if isinstance(self._mgr, ArrayManager): + return False + blocks = self._mgr.blocks + if len(blocks) != 1: + return False + + dtype = blocks[0].dtype + # TODO(EA2D) special case would be unnecessary with 2D EAs + return not is_1d_only_ea_dtype(dtype) + + @property + def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: + """ + Analogue to ._values that may return a 2D ExtensionArray. + """ + mgr = self._mgr + + if isinstance(mgr, ArrayManager): + if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): + # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" + # has no attribute "reshape" + return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] + return ensure_wrapped_if_datetimelike(self.values) + + blocks = mgr.blocks + if len(blocks) != 1: + return ensure_wrapped_if_datetimelike(self.values) + + arr = blocks[0].values + if arr.ndim == 1: + # non-2D ExtensionArray + return self.values + + # more generally, whatever we allow in NDArrayBackedExtensionBlock + arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) + return arr.T + + # ---------------------------------------------------------------------- + # Rendering Methods + + def _repr_fits_vertical_(self) -> bool: + """ + Check length against max_rows. + """ + max_rows = get_option("display.max_rows") + return len(self) <= max_rows + + def _repr_fits_horizontal_(self) -> bool: + """ + Check if full repr fits in horizontal boundaries imposed by the display + options width and max_columns. + """ + width, height = console.get_console_size() + max_columns = get_option("display.max_columns") + nb_columns = len(self.columns) + + # exceed max columns + if (max_columns and nb_columns > max_columns) or ( + width and nb_columns > (width // 2) + ): + return False + + # used by repr_html under IPython notebook or scripts ignore terminal + # dims + if width is None or not console.in_interactive_session(): + return True + + if get_option("display.width") is not None or console.in_ipython_frontend(): + # check at least the column row for excessive width + max_rows = 1 + else: + max_rows = get_option("display.max_rows") + + # when auto-detecting, so width=None and not in ipython front end + # check whether repr fits horizontal by actually checking + # the width of the rendered repr + buf = StringIO() + + # only care about the stuff we'll actually print out + # and to_string on entire frame may be expensive + d = self + + if max_rows is not None: # unlimited rows + # min of two, where one may be None + d = d.iloc[: min(max_rows, len(d))] + else: + return True + + d.to_string(buf=buf) + value = buf.getvalue() + repr_width = max(len(line) for line in value.split("\n")) + + return repr_width < width + + def _info_repr(self) -> bool: + """ + True if the repr should show the info view. + """ + info_repr_option = get_option("display.large_repr") == "info" + return info_repr_option and not ( + self._repr_fits_horizontal_() and self._repr_fits_vertical_() + ) + + def __repr__(self) -> str: + """ + Return a string representation for a particular DataFrame. + """ + if self._info_repr(): + buf = StringIO() + self.info(buf=buf) + return buf.getvalue() + + repr_params = fmt.get_dataframe_repr_params() + return self.to_string(**repr_params) + + def _repr_html_(self) -> str | None: + """ + Return a html representation for a particular DataFrame. + + Mainly for IPython notebook. + """ + if self._info_repr(): + buf = StringIO() + self.info(buf=buf) + # need to escape the , should be the first line. + val = buf.getvalue().replace("<", r"<", 1) + val = val.replace(">", r">", 1) + return f"
{val}
" + + if get_option("display.notebook_repr_html"): + max_rows = get_option("display.max_rows") + min_rows = get_option("display.min_rows") + max_cols = get_option("display.max_columns") + show_dimensions = get_option("display.show_dimensions") + + formatter = fmt.DataFrameFormatter( + self, + columns=None, + col_space=None, + na_rep="NaN", + formatters=None, + float_format=None, + sparsify=None, + justify=None, + index_names=True, + header=True, + index=True, + bold_rows=True, + escape=True, + max_rows=max_rows, + min_rows=min_rows, + max_cols=max_cols, + show_dimensions=show_dimensions, + decimal=".", + ) + return fmt.DataFrameRenderer(formatter).to_html(notebook=True) + else: + return None + + @overload + def to_string( + self, + buf: None = ..., + columns: Axes | None = ..., + col_space: int | list[int] | dict[Hashable, int] | None = ..., + header: bool | SequenceNotStr[str] = ..., + index: bool = ..., + na_rep: str = ..., + formatters: fmt.FormattersType | None = ..., + float_format: fmt.FloatFormatType | None = ..., + sparsify: bool | None = ..., + index_names: bool = ..., + justify: str | None = ..., + max_rows: int | None = ..., + max_cols: int | None = ..., + show_dimensions: bool = ..., + decimal: str = ..., + line_width: int | None = ..., + min_rows: int | None = ..., + max_colwidth: int | None = ..., + encoding: str | None = ..., + ) -> str: + ... + + @overload + def to_string( + self, + buf: FilePath | WriteBuffer[str], + columns: Axes | None = ..., + col_space: int | list[int] | dict[Hashable, int] | None = ..., + header: bool | SequenceNotStr[str] = ..., + index: bool = ..., + na_rep: str = ..., + formatters: fmt.FormattersType | None = ..., + float_format: fmt.FloatFormatType | None = ..., + sparsify: bool | None = ..., + index_names: bool = ..., + justify: str | None = ..., + max_rows: int | None = ..., + max_cols: int | None = ..., + show_dimensions: bool = ..., + decimal: str = ..., + line_width: int | None = ..., + min_rows: int | None = ..., + max_colwidth: int | None = ..., + encoding: str | None = ..., + ) -> None: + ... + + @deprecate_nonkeyword_arguments( + version="3.0", allowed_args=["self", "buf"], name="to_string" + ) + @Substitution( + header_type="bool or list of str", + header="Write out the column names. If a list of columns " + "is given, it is assumed to be aliases for the " + "column names", + col_space_type="int, list or dict of int", + col_space="The minimum width of each column. If a list of ints is given " + "every integers corresponds with one column. If a dict is given, the key " + "references the column, while the value defines the space to use.", + ) + @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) + def to_string( + self, + buf: FilePath | WriteBuffer[str] | None = None, + columns: Axes | None = None, + col_space: int | list[int] | dict[Hashable, int] | None = None, + header: bool | SequenceNotStr[str] = True, + index: bool = True, + na_rep: str = "NaN", + formatters: fmt.FormattersType | None = None, + float_format: fmt.FloatFormatType | None = None, + sparsify: bool | None = None, + index_names: bool = True, + justify: str | None = None, + max_rows: int | None = None, + max_cols: int | None = None, + show_dimensions: bool = False, + decimal: str = ".", + line_width: int | None = None, + min_rows: int | None = None, + max_colwidth: int | None = None, + encoding: str | None = None, + ) -> str | None: + """ + Render a DataFrame to a console-friendly tabular output. + %(shared_params)s + line_width : int, optional + Width to wrap a line in characters. + min_rows : int, optional + The number of rows to display in the console in a truncated repr + (when number of rows is above `max_rows`). + max_colwidth : int, optional + Max width to truncate each column in characters. By default, no limit. + encoding : str, default "utf-8" + Set character encoding. + %(returns)s + See Also + -------- + to_html : Convert DataFrame to HTML. + + Examples + -------- + >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} + >>> df = pd.DataFrame(d) + >>> print(df.to_string()) + col1 col2 + 0 1 4 + 1 2 5 + 2 3 6 + """ + from pandas import option_context + + with option_context("display.max_colwidth", max_colwidth): + formatter = fmt.DataFrameFormatter( + self, + columns=columns, + col_space=col_space, + na_rep=na_rep, + formatters=formatters, + float_format=float_format, + sparsify=sparsify, + justify=justify, + index_names=index_names, + header=header, + index=index, + min_rows=min_rows, + max_rows=max_rows, + max_cols=max_cols, + show_dimensions=show_dimensions, + decimal=decimal, + ) + return fmt.DataFrameRenderer(formatter).to_string( + buf=buf, + encoding=encoding, + line_width=line_width, + ) + + def _get_values_for_csv( + self, + *, + float_format: FloatFormatType | None, + date_format: str | None, + decimal: str, + na_rep: str, + quoting, # int csv.QUOTE_FOO from stdlib + ) -> Self: + # helper used by to_csv + mgr = self._mgr.get_values_for_csv( + float_format=float_format, + date_format=date_format, + decimal=decimal, + na_rep=na_rep, + quoting=quoting, + ) + # error: Incompatible return value type (got "DataFrame", expected "Self") + return self._constructor_from_mgr(mgr, axes=mgr.axes) # type: ignore[return-value] + + # ---------------------------------------------------------------------- + + @property + def style(self) -> Styler: + """ + Returns a Styler object. + + Contains methods for building a styled HTML representation of the DataFrame. + + See Also + -------- + io.formats.style.Styler : Helps style a DataFrame or Series according to the + data with HTML and CSS. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2, 3]}) + >>> df.style # doctest: +SKIP + + Please see + `Table Visualization <../../user_guide/style.ipynb>`_ for more examples. + """ + from pandas.io.formats.style import Styler + + return Styler(self) + + _shared_docs[ + "items" + ] = r""" + Iterate over (column name, Series) pairs. + + Iterates over the DataFrame columns, returning a tuple with + the column name and the content as a Series. + + Yields + ------ + label : object + The column names for the DataFrame being iterated over. + content : Series + The column entries belonging to each label, as a Series. + + See Also + -------- + DataFrame.iterrows : Iterate over DataFrame rows as + (index, Series) pairs. + DataFrame.itertuples : Iterate over DataFrame rows as namedtuples + of the values. + + Examples + -------- + >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], + ... 'population': [1864, 22000, 80000]}, + ... index=['panda', 'polar', 'koala']) + >>> df + species population + panda bear 1864 + polar bear 22000 + koala marsupial 80000 + >>> for label, content in df.items(): + ... print(f'label: {label}') + ... print(f'content: {content}', sep='\n') + ... + label: species + content: + panda bear + polar bear + koala marsupial + Name: species, dtype: object + label: population + content: + panda 1864 + polar 22000 + koala 80000 + Name: population, dtype: int64 + """ + + @Appender(_shared_docs["items"]) + def items(self) -> Iterable[tuple[Hashable, Series]]: + if self.columns.is_unique and hasattr(self, "_item_cache"): + for k in self.columns: + yield k, self._get_item_cache(k) + else: + for i, k in enumerate(self.columns): + yield k, self._ixs(i, axis=1) + + def iterrows(self) -> Iterable[tuple[Hashable, Series]]: + """ + Iterate over DataFrame rows as (index, Series) pairs. + + Yields + ------ + index : label or tuple of label + The index of the row. A tuple for a `MultiIndex`. + data : Series + The data of the row as a Series. + + See Also + -------- + DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. + DataFrame.items : Iterate over (column name, Series) pairs. + + Notes + ----- + 1. Because ``iterrows`` returns a Series for each row, + it does **not** preserve dtypes across the rows (dtypes are + preserved across columns for DataFrames). + + To preserve dtypes while iterating over the rows, it is better + to use :meth:`itertuples` which returns namedtuples of the values + and which is generally faster than ``iterrows``. + + 2. You should **never modify** something you are iterating over. + This is not guaranteed to work in all cases. Depending on the + data types, the iterator returns a copy and not a view, and writing + to it will have no effect. + + Examples + -------- + + >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) + >>> row = next(df.iterrows())[1] + >>> row + int 1.0 + float 1.5 + Name: 0, dtype: float64 + >>> print(row['int'].dtype) + float64 + >>> print(df['int'].dtype) + int64 + """ + columns = self.columns + klass = self._constructor_sliced + using_cow = using_copy_on_write() + for k, v in zip(self.index, self.values): + s = klass(v, index=columns, name=k).__finalize__(self) + if using_cow and self._mgr.is_single_block: + s._mgr.add_references(self._mgr) # type: ignore[arg-type] + yield k, s + + def itertuples( + self, index: bool = True, name: str | None = "Pandas" + ) -> Iterable[tuple[Any, ...]]: + """ + Iterate over DataFrame rows as namedtuples. + + Parameters + ---------- + index : bool, default True + If True, return the index as the first element of the tuple. + name : str or None, default "Pandas" + The name of the returned namedtuples or None to return regular + tuples. + + Returns + ------- + iterator + An object to iterate over namedtuples for each row in the + DataFrame with the first field possibly being the index and + following fields being the column values. + + See Also + -------- + DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) + pairs. + DataFrame.items : Iterate over (column name, Series) pairs. + + Notes + ----- + The column names will be renamed to positional names if they are + invalid Python identifiers, repeated, or start with an underscore. + + Examples + -------- + >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, + ... index=['dog', 'hawk']) + >>> df + num_legs num_wings + dog 4 0 + hawk 2 2 + >>> for row in df.itertuples(): + ... print(row) + ... + Pandas(Index='dog', num_legs=4, num_wings=0) + Pandas(Index='hawk', num_legs=2, num_wings=2) + + By setting the `index` parameter to False we can remove the index + as the first element of the tuple: + + >>> for row in df.itertuples(index=False): + ... print(row) + ... + Pandas(num_legs=4, num_wings=0) + Pandas(num_legs=2, num_wings=2) + + With the `name` parameter set we set a custom name for the yielded + namedtuples: + + >>> for row in df.itertuples(name='Animal'): + ... print(row) + ... + Animal(Index='dog', num_legs=4, num_wings=0) + Animal(Index='hawk', num_legs=2, num_wings=2) + """ + arrays = [] + fields = list(self.columns) + if index: + arrays.append(self.index) + fields.insert(0, "Index") + + # use integer indexing because of possible duplicate column names + arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) + + if name is not None: + # https://github.com/python/mypy/issues/9046 + # error: namedtuple() expects a string literal as the first argument + itertuple = collections.namedtuple( # type: ignore[misc] + name, fields, rename=True + ) + return map(itertuple._make, zip(*arrays)) + + # fallback to regular tuples + return zip(*arrays) + + def __len__(self) -> int: + """ + Returns length of info axis, but here we use the index. + """ + return len(self.index) + + @overload + def dot(self, other: Series) -> Series: + ... + + @overload + def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: + ... + + def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: + """ + Compute the matrix multiplication between the DataFrame and other. + + This method computes the matrix product between the DataFrame and the + values of an other Series, DataFrame or a numpy array. + + It can also be called using ``self @ other``. + + Parameters + ---------- + other : Series, DataFrame or array-like + The other object to compute the matrix product with. + + Returns + ------- + Series or DataFrame + If other is a Series, return the matrix product between self and + other as a Series. If other is a DataFrame or a numpy.array, return + the matrix product of self and other in a DataFrame of a np.array. + + See Also + -------- + Series.dot: Similar method for Series. + + Notes + ----- + The dimensions of DataFrame and other must be compatible in order to + compute the matrix multiplication. In addition, the column names of + DataFrame and the index of other must contain the same values, as they + will be aligned prior to the multiplication. + + The dot method for Series computes the inner product, instead of the + matrix product here. + + Examples + -------- + Here we multiply a DataFrame with a Series. + + >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) + >>> s = pd.Series([1, 1, 2, 1]) + >>> df.dot(s) + 0 -4 + 1 5 + dtype: int64 + + Here we multiply a DataFrame with another DataFrame. + + >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) + >>> df.dot(other) + 0 1 + 0 1 4 + 1 2 2 + + Note that the dot method give the same result as @ + + >>> df @ other + 0 1 + 0 1 4 + 1 2 2 + + The dot method works also if other is an np.array. + + >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) + >>> df.dot(arr) + 0 1 + 0 1 4 + 1 2 2 + + Note how shuffling of the objects does not change the result. + + >>> s2 = s.reindex([1, 0, 2, 3]) + >>> df.dot(s2) + 0 -4 + 1 5 + dtype: int64 + """ + if isinstance(other, (Series, DataFrame)): + common = self.columns.union(other.index) + if len(common) > len(self.columns) or len(common) > len(other.index): + raise ValueError("matrices are not aligned") + + left = self.reindex(columns=common, copy=False) + right = other.reindex(index=common, copy=False) + lvals = left.values + rvals = right._values + else: + left = self + lvals = self.values + rvals = np.asarray(other) + if lvals.shape[1] != rvals.shape[0]: + raise ValueError( + f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" + ) + + if isinstance(other, DataFrame): + common_type = find_common_type(list(self.dtypes) + list(other.dtypes)) + return self._constructor( + np.dot(lvals, rvals), + index=left.index, + columns=other.columns, + copy=False, + dtype=common_type, + ) + elif isinstance(other, Series): + common_type = find_common_type(list(self.dtypes) + [other.dtypes]) + return self._constructor_sliced( + np.dot(lvals, rvals), index=left.index, copy=False, dtype=common_type + ) + elif isinstance(rvals, (np.ndarray, Index)): + result = np.dot(lvals, rvals) + if result.ndim == 2: + return self._constructor(result, index=left.index, copy=False) + else: + return self._constructor_sliced(result, index=left.index, copy=False) + else: # pragma: no cover + raise TypeError(f"unsupported type: {type(other)}") + + @overload + def __matmul__(self, other: Series) -> Series: + ... + + @overload + def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: + ... + + def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: + """ + Matrix multiplication using binary `@` operator. + """ + return self.dot(other) + + def __rmatmul__(self, other) -> DataFrame: + """ + Matrix multiplication using binary `@` operator. + """ + try: + return self.T.dot(np.transpose(other)).T + except ValueError as err: + if "shape mismatch" not in str(err): + raise + # GH#21581 give exception message for original shapes + msg = f"shapes {np.shape(other)} and {self.shape} not aligned" + raise ValueError(msg) from err + + # ---------------------------------------------------------------------- + # IO methods (to / from other formats) + + @classmethod + def from_dict( + cls, + data: dict, + orient: FromDictOrient = "columns", + dtype: Dtype | None = None, + columns: Axes | None = None, + ) -> DataFrame: + """ + Construct DataFrame from dict of array-like or dicts. + + Creates DataFrame object from dictionary by columns or by index + allowing dtype specification. + + Parameters + ---------- + data : dict + Of the form {field : array-like} or {field : dict}. + orient : {'columns', 'index', 'tight'}, default 'columns' + The "orientation" of the data. If the keys of the passed dict + should be the columns of the resulting DataFrame, pass 'columns' + (default). Otherwise if the keys should be rows, pass 'index'. + If 'tight', assume a dict with keys ['index', 'columns', 'data', + 'index_names', 'column_names']. + + .. versionadded:: 1.4.0 + 'tight' as an allowed value for the ``orient`` argument + + dtype : dtype, default None + Data type to force after DataFrame construction, otherwise infer. + columns : list, default None + Column labels to use when ``orient='index'``. Raises a ValueError + if used with ``orient='columns'`` or ``orient='tight'``. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.from_records : DataFrame from structured ndarray, sequence + of tuples or dicts, or DataFrame. + DataFrame : DataFrame object creation using constructor. + DataFrame.to_dict : Convert the DataFrame to a dictionary. + + Examples + -------- + By default the keys of the dict become the DataFrame columns: + + >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} + >>> pd.DataFrame.from_dict(data) + col_1 col_2 + 0 3 a + 1 2 b + 2 1 c + 3 0 d + + Specify ``orient='index'`` to create the DataFrame using dictionary + keys as rows: + + >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} + >>> pd.DataFrame.from_dict(data, orient='index') + 0 1 2 3 + row_1 3 2 1 0 + row_2 a b c d + + When using the 'index' orientation, the column names can be + specified manually: + + >>> pd.DataFrame.from_dict(data, orient='index', + ... columns=['A', 'B', 'C', 'D']) + A B C D + row_1 3 2 1 0 + row_2 a b c d + + Specify ``orient='tight'`` to create the DataFrame using a 'tight' + format: + + >>> data = {'index': [('a', 'b'), ('a', 'c')], + ... 'columns': [('x', 1), ('y', 2)], + ... 'data': [[1, 3], [2, 4]], + ... 'index_names': ['n1', 'n2'], + ... 'column_names': ['z1', 'z2']} + >>> pd.DataFrame.from_dict(data, orient='tight') + z1 x y + z2 1 2 + n1 n2 + a b 1 3 + c 2 4 + """ + index = None + orient = orient.lower() # type: ignore[assignment] + if orient == "index": + if len(data) > 0: + # TODO speed up Series case + if isinstance(next(iter(data.values())), (Series, dict)): + data = _from_nested_dict(data) + else: + index = list(data.keys()) + # error: Incompatible types in assignment (expression has type + # "List[Any]", variable has type "Dict[Any, Any]") + data = list(data.values()) # type: ignore[assignment] + elif orient in ("columns", "tight"): + if columns is not None: + raise ValueError(f"cannot use columns parameter with orient='{orient}'") + else: # pragma: no cover + raise ValueError( + f"Expected 'index', 'columns' or 'tight' for orient parameter. " + f"Got '{orient}' instead" + ) + + if orient != "tight": + return cls(data, index=index, columns=columns, dtype=dtype) + else: + realdata = data["data"] + + def create_index(indexlist, namelist): + index: Index + if len(namelist) > 1: + index = MultiIndex.from_tuples(indexlist, names=namelist) + else: + index = Index(indexlist, name=namelist[0]) + return index + + index = create_index(data["index"], data["index_names"]) + columns = create_index(data["columns"], data["column_names"]) + return cls(realdata, index=index, columns=columns, dtype=dtype) + + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert the DataFrame to a NumPy array. + + By default, the dtype of the returned array will be the common NumPy + dtype of all types in the DataFrame. For example, if the dtypes are + ``float16`` and ``float32``, the results dtype will be ``float32``. + This may require copying data and coercing values, which may be + expensive. + + Parameters + ---------- + dtype : str or numpy.dtype, optional + The dtype to pass to :meth:`numpy.asarray`. + copy : bool, default False + Whether to ensure that the returned value is not a view on + another array. Note that ``copy=False`` does not *ensure* that + ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that + a copy is made, even if not strictly necessary. + na_value : Any, optional + The value to use for missing values. The default value depends + on `dtype` and the dtypes of the DataFrame columns. + + Returns + ------- + numpy.ndarray + + See Also + -------- + Series.to_numpy : Similar method for Series. + + Examples + -------- + >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() + array([[1, 3], + [2, 4]]) + + With heterogeneous data, the lowest common type will have to + be used. + + >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) + >>> df.to_numpy() + array([[1. , 3. ], + [2. , 4.5]]) + + For a mix of numeric and non-numeric types, the output array will + have object dtype. + + >>> df['C'] = pd.date_range('2000', periods=2) + >>> df.to_numpy() + array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], + [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) + """ + if dtype is not None: + dtype = np.dtype(dtype) + result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) + if result.dtype is not dtype: + result = np.asarray(result, dtype=dtype) + + return result + + def _create_data_for_split_and_tight_to_dict( + self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] + ) -> list: + """ + Simple helper method to create data for to ``to_dict(orient="split")`` and + ``to_dict(orient="tight")`` to create the main output data + """ + if are_all_object_dtype_cols: + data = [ + list(map(maybe_box_native, t)) + for t in self.itertuples(index=False, name=None) + ] + else: + data = [list(t) for t in self.itertuples(index=False, name=None)] + if object_dtype_indices: + # If we have object_dtype_cols, apply maybe_box_naive after list + # comprehension for perf + for row in data: + for i in object_dtype_indices: + row[i] = maybe_box_native(row[i]) + return data + + @overload + def to_dict( + self, + orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., + *, + into: type[MutableMappingT] | MutableMappingT, + index: bool = ..., + ) -> MutableMappingT: + ... + + @overload + def to_dict( + self, + orient: Literal["records"], + *, + into: type[MutableMappingT] | MutableMappingT, + index: bool = ..., + ) -> list[MutableMappingT]: + ... + + @overload + def to_dict( + self, + orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., + *, + into: type[dict] = ..., + index: bool = ..., + ) -> dict: + ... + + @overload + def to_dict( + self, + orient: Literal["records"], + *, + into: type[dict] = ..., + index: bool = ..., + ) -> list[dict]: + ... + + # error: Incompatible default for argument "into" (default has type "type + # [dict[Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT") + @deprecate_nonkeyword_arguments( + version="3.0", allowed_args=["self", "orient"], name="to_dict" + ) + def to_dict( + self, + orient: Literal[ + "dict", "list", "series", "split", "tight", "records", "index" + ] = "dict", + into: type[MutableMappingT] + | MutableMappingT = dict, # type: ignore[assignment] + index: bool = True, + ) -> MutableMappingT | list[MutableMappingT]: + """ + Convert the DataFrame to a dictionary. + + The type of the key-value pairs can be customized with the parameters + (see below). + + Parameters + ---------- + orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} + Determines the type of the values of the dictionary. + + - 'dict' (default) : dict like {column -> {index -> value}} + - 'list' : dict like {column -> [values]} + - 'series' : dict like {column -> Series(values)} + - 'split' : dict like + {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} + - 'tight' : dict like + {'index' -> [index], 'columns' -> [columns], 'data' -> [values], + 'index_names' -> [index.names], 'column_names' -> [column.names]} + - 'records' : list like + [{column -> value}, ... , {column -> value}] + - 'index' : dict like {index -> {column -> value}} + + .. versionadded:: 1.4.0 + 'tight' as an allowed value for the ``orient`` argument + + into : class, default dict + The collections.abc.MutableMapping subclass used for all Mappings + in the return value. Can be the actual class or an empty + instance of the mapping type you want. If you want a + collections.defaultdict, you must pass it initialized. + + index : bool, default True + Whether to include the index item (and index_names item if `orient` + is 'tight') in the returned dictionary. Can only be ``False`` + when `orient` is 'split' or 'tight'. + + .. versionadded:: 2.0.0 + + Returns + ------- + dict, list or collections.abc.MutableMapping + Return a collections.abc.MutableMapping object representing the + DataFrame. The resulting transformation depends on the `orient` + parameter. + + See Also + -------- + DataFrame.from_dict: Create a DataFrame from a dictionary. + DataFrame.to_json: Convert a DataFrame to JSON format. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], + ... 'col2': [0.5, 0.75]}, + ... index=['row1', 'row2']) + >>> df + col1 col2 + row1 1 0.50 + row2 2 0.75 + >>> df.to_dict() + {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} + + You can specify the return orientation. + + >>> df.to_dict('series') + {'col1': row1 1 + row2 2 + Name: col1, dtype: int64, + 'col2': row1 0.50 + row2 0.75 + Name: col2, dtype: float64} + + >>> df.to_dict('split') + {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], + 'data': [[1, 0.5], [2, 0.75]]} + + >>> df.to_dict('records') + [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] + + >>> df.to_dict('index') + {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} + + >>> df.to_dict('tight') + {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], + 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} + + You can also specify the mapping type. + + >>> from collections import OrderedDict, defaultdict + >>> df.to_dict(into=OrderedDict) + OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), + ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) + + If you want a `defaultdict`, you need to initialize it: + + >>> dd = defaultdict(list) + >>> df.to_dict('records', into=dd) + [defaultdict(, {'col1': 1, 'col2': 0.5}), + defaultdict(, {'col1': 2, 'col2': 0.75})] + """ + from pandas.core.methods.to_dict import to_dict + + return to_dict(self, orient, into=into, index=index) + + @deprecate_nonkeyword_arguments( + version="3.0", allowed_args=["self", "destination_table"], name="to_gbq" + ) + def to_gbq( + self, + destination_table: str, + project_id: str | None = None, + chunksize: int | None = None, + reauth: bool = False, + if_exists: ToGbqIfexist = "fail", + auth_local_webserver: bool = True, + table_schema: list[dict[str, str]] | None = None, + location: str | None = None, + progress_bar: bool = True, + credentials=None, + ) -> None: + """ + Write a DataFrame to a Google BigQuery table. + + .. deprecated:: 2.2.0 + + Please use ``pandas_gbq.to_gbq`` instead. + + This function requires the `pandas-gbq package + `__. + + See the `How to authenticate with Google BigQuery + `__ + guide for authentication instructions. + + Parameters + ---------- + destination_table : str + Name of table to be written, in the form ``dataset.tablename``. + project_id : str, optional + Google BigQuery Account project ID. Optional when available from + the environment. + chunksize : int, optional + Number of rows to be inserted in each chunk from the dataframe. + Set to ``None`` to load the whole dataframe at once. + reauth : bool, default False + Force Google BigQuery to re-authenticate the user. This is useful + if multiple accounts are used. + if_exists : str, default 'fail' + Behavior when the destination table exists. Value can be one of: + + ``'fail'`` + If table exists raise pandas_gbq.gbq.TableCreationError. + ``'replace'`` + If table exists, drop it, recreate it, and insert data. + ``'append'`` + If table exists, insert data. Create if does not exist. + auth_local_webserver : bool, default True + Use the `local webserver flow`_ instead of the `console flow`_ + when getting user credentials. + + .. _local webserver flow: + https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server + .. _console flow: + https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console + + *New in version 0.2.0 of pandas-gbq*. + + .. versionchanged:: 1.5.0 + Default value is changed to ``True``. Google has deprecated the + ``auth_local_webserver = False`` `"out of band" (copy-paste) + flow + `_. + table_schema : list of dicts, optional + List of BigQuery table fields to which according DataFrame + columns conform to, e.g. ``[{'name': 'col1', 'type': + 'STRING'},...]``. If schema is not provided, it will be + generated according to dtypes of DataFrame columns. See + BigQuery API documentation on available names of a field. + + *New in version 0.3.1 of pandas-gbq*. + location : str, optional + Location where the load job should run. See the `BigQuery locations + documentation + `__ for a + list of available locations. The location must match that of the + target dataset. + + *New in version 0.5.0 of pandas-gbq*. + progress_bar : bool, default True + Use the library `tqdm` to show the progress bar for the upload, + chunk by chunk. + + *New in version 0.5.0 of pandas-gbq*. + credentials : google.auth.credentials.Credentials, optional + Credentials for accessing Google APIs. Use this parameter to + override default credentials, such as to use Compute Engine + :class:`google.auth.compute_engine.Credentials` or Service + Account :class:`google.oauth2.service_account.Credentials` + directly. + + *New in version 0.8.0 of pandas-gbq*. + + See Also + -------- + pandas_gbq.to_gbq : This function in the pandas-gbq library. + read_gbq : Read a DataFrame from Google BigQuery. + + Examples + -------- + Example taken from `Google BigQuery documentation + `_ + + >>> project_id = "my-project" + >>> table_id = 'my_dataset.my_table' + >>> df = pd.DataFrame({ + ... "my_string": ["a", "b", "c"], + ... "my_int64": [1, 2, 3], + ... "my_float64": [4.0, 5.0, 6.0], + ... "my_bool1": [True, False, True], + ... "my_bool2": [False, True, False], + ... "my_dates": pd.date_range("now", periods=3), + ... } + ... ) + + >>> df.to_gbq(table_id, project_id=project_id) # doctest: +SKIP + """ + from pandas.io import gbq + + gbq.to_gbq( + self, + destination_table, + project_id=project_id, + chunksize=chunksize, + reauth=reauth, + if_exists=if_exists, + auth_local_webserver=auth_local_webserver, + table_schema=table_schema, + location=location, + progress_bar=progress_bar, + credentials=credentials, + ) + + @classmethod + def from_records( + cls, + data, + index=None, + exclude=None, + columns=None, + coerce_float: bool = False, + nrows: int | None = None, + ) -> DataFrame: + """ + Convert structured or record ndarray to DataFrame. + + Creates a DataFrame object from a structured ndarray, sequence of + tuples or dicts, or DataFrame. + + Parameters + ---------- + data : structured ndarray, sequence of tuples or dicts, or DataFrame + Structured input data. + + .. deprecated:: 2.1.0 + Passing a DataFrame is deprecated. + index : str, list of fields, array-like + Field of array to use as the index, alternately a specific set of + input labels to use. + exclude : sequence, default None + Columns or fields to exclude. + columns : sequence, default None + Column names to use. If the passed data do not have names + associated with them, this argument provides names for the + columns. Otherwise this argument indicates the order of the columns + in the result (any names not found in the data will become all-NA + columns). + coerce_float : bool, default False + Attempt to convert values of non-string, non-numeric objects (like + decimal.Decimal) to floating point, useful for SQL result sets. + nrows : int, default None + Number of rows to read if data is an iterator. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.from_dict : DataFrame from dict of array-like or dicts. + DataFrame : DataFrame object creation using constructor. + + Examples + -------- + Data can be provided as a structured ndarray: + + >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], + ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) + >>> pd.DataFrame.from_records(data) + col_1 col_2 + 0 3 a + 1 2 b + 2 1 c + 3 0 d + + Data can be provided as a list of dicts: + + >>> data = [{'col_1': 3, 'col_2': 'a'}, + ... {'col_1': 2, 'col_2': 'b'}, + ... {'col_1': 1, 'col_2': 'c'}, + ... {'col_1': 0, 'col_2': 'd'}] + >>> pd.DataFrame.from_records(data) + col_1 col_2 + 0 3 a + 1 2 b + 2 1 c + 3 0 d + + Data can be provided as a list of tuples with corresponding columns: + + >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] + >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) + col_1 col_2 + 0 3 a + 1 2 b + 2 1 c + 3 0 d + """ + if isinstance(data, DataFrame): + warnings.warn( + "Passing a DataFrame to DataFrame.from_records is deprecated. Use " + "set_index and/or drop to modify the DataFrame instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if columns is not None: + if is_scalar(columns): + columns = [columns] + data = data[columns] + if index is not None: + data = data.set_index(index) + if exclude is not None: + data = data.drop(columns=exclude) + return data.copy(deep=False) + + result_index = None + + # Make a copy of the input columns so we can modify it + if columns is not None: + columns = ensure_index(columns) + + def maybe_reorder( + arrays: list[ArrayLike], arr_columns: Index, columns: Index, index + ) -> tuple[list[ArrayLike], Index, Index | None]: + """ + If our desired 'columns' do not match the data's pre-existing 'arr_columns', + we re-order our arrays. This is like a pre-emptive (cheap) reindex. + """ + if len(arrays): + length = len(arrays[0]) + else: + length = 0 + + result_index = None + if len(arrays) == 0 and index is None and length == 0: + result_index = default_index(0) + + arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) + return arrays, arr_columns, result_index + + if is_iterator(data): + if nrows == 0: + return cls() + + try: + first_row = next(data) + except StopIteration: + return cls(index=index, columns=columns) + + dtype = None + if hasattr(first_row, "dtype") and first_row.dtype.names: + dtype = first_row.dtype + + values = [first_row] + + if nrows is None: + values += data + else: + values.extend(itertools.islice(data, nrows - 1)) + + if dtype is not None: + data = np.array(values, dtype=dtype) + else: + data = values + + if isinstance(data, dict): + if columns is None: + columns = arr_columns = ensure_index(sorted(data)) + arrays = [data[k] for k in columns] + else: + arrays = [] + arr_columns_list = [] + for k, v in data.items(): + if k in columns: + arr_columns_list.append(k) + arrays.append(v) + + arr_columns = Index(arr_columns_list) + arrays, arr_columns, result_index = maybe_reorder( + arrays, arr_columns, columns, index + ) + + elif isinstance(data, np.ndarray): + arrays, columns = to_arrays(data, columns) + arr_columns = columns + else: + arrays, arr_columns = to_arrays(data, columns) + if coerce_float: + for i, arr in enumerate(arrays): + if arr.dtype == object: + # error: Argument 1 to "maybe_convert_objects" has + # incompatible type "Union[ExtensionArray, ndarray]"; + # expected "ndarray" + arrays[i] = lib.maybe_convert_objects( + arr, # type: ignore[arg-type] + try_float=True, + ) + + arr_columns = ensure_index(arr_columns) + if columns is None: + columns = arr_columns + else: + arrays, arr_columns, result_index = maybe_reorder( + arrays, arr_columns, columns, index + ) + + if exclude is None: + exclude = set() + else: + exclude = set(exclude) + + if index is not None: + if isinstance(index, str) or not hasattr(index, "__iter__"): + i = columns.get_loc(index) + exclude.add(index) + if len(arrays) > 0: + result_index = Index(arrays[i], name=index) + else: + result_index = Index([], name=index) + else: + try: + index_data = [arrays[arr_columns.get_loc(field)] for field in index] + except (KeyError, TypeError): + # raised by get_loc, see GH#29258 + result_index = index + else: + result_index = ensure_index_from_sequences(index_data, names=index) + exclude.update(index) + + if any(exclude): + arr_exclude = [x for x in exclude if x in arr_columns] + to_remove = [arr_columns.get_loc(col) for col in arr_exclude] + arrays = [v for i, v in enumerate(arrays) if i not in to_remove] + + columns = columns.drop(exclude) + + manager = _get_option("mode.data_manager", silent=True) + mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) + + return cls._from_mgr(mgr, axes=mgr.axes) + + def to_records( + self, index: bool = True, column_dtypes=None, index_dtypes=None + ) -> np.rec.recarray: + """ + Convert DataFrame to a NumPy record array. + + Index will be included as the first field of the record array if + requested. + + Parameters + ---------- + index : bool, default True + Include index in resulting record array, stored in 'index' + field or using the index label, if set. + column_dtypes : str, type, dict, default None + If a string or type, the data type to store all columns. If + a dictionary, a mapping of column names and indices (zero-indexed) + to specific data types. + index_dtypes : str, type, dict, default None + If a string or type, the data type to store all index levels. If + a dictionary, a mapping of index level names and indices + (zero-indexed) to specific data types. + + This mapping is applied only if `index=True`. + + Returns + ------- + numpy.rec.recarray + NumPy ndarray with the DataFrame labels as fields and each row + of the DataFrame as entries. + + See Also + -------- + DataFrame.from_records: Convert structured or record ndarray + to DataFrame. + numpy.rec.recarray: An ndarray that allows field access using + attributes, analogous to typed columns in a + spreadsheet. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, + ... index=['a', 'b']) + >>> df + A B + a 1 0.50 + b 2 0.75 + >>> df.to_records() + rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], + dtype=[('index', 'O'), ('A', '>> df.index = df.index.rename("I") + >>> df.to_records() + rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], + dtype=[('I', 'O'), ('A', '>> df.to_records(index=False) + rec.array([(1, 0.5 ), (2, 0.75)], + dtype=[('A', '>> df.to_records(column_dtypes={"A": "int32"}) + rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], + dtype=[('I', 'O'), ('A', '>> df.to_records(index_dtypes=">> index_dtypes = f">> df.to_records(index_dtypes=index_dtypes) + rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], + dtype=[('I', 'S1'), ('A', ' Self: + """ + Create DataFrame from a list of arrays corresponding to the columns. + + Parameters + ---------- + arrays : list-like of arrays + Each array in the list corresponds to one column, in order. + columns : list-like, Index + The column names for the resulting DataFrame. + index : list-like, Index + The rows labels for the resulting DataFrame. + dtype : dtype, optional + Optional dtype to enforce for all arrays. + verify_integrity : bool, default True + Validate and homogenize all input. If set to False, it is assumed + that all elements of `arrays` are actual arrays how they will be + stored in a block (numpy ndarray or ExtensionArray), have the same + length as and are aligned with the index, and that `columns` and + `index` are ensured to be an Index object. + + Returns + ------- + DataFrame + """ + if dtype is not None: + dtype = pandas_dtype(dtype) + + manager = _get_option("mode.data_manager", silent=True) + columns = ensure_index(columns) + if len(columns) != len(arrays): + raise ValueError("len(columns) must match len(arrays)") + mgr = arrays_to_mgr( + arrays, + columns, + index, + dtype=dtype, + verify_integrity=verify_integrity, + typ=manager, + ) + return cls._from_mgr(mgr, axes=mgr.axes) + + @doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "path", + ) + def to_stata( + self, + path: FilePath | WriteBuffer[bytes], + *, + convert_dates: dict[Hashable, str] | None = None, + write_index: bool = True, + byteorder: ToStataByteorder | None = None, + time_stamp: datetime.datetime | None = None, + data_label: str | None = None, + variable_labels: dict[Hashable, str] | None = None, + version: int | None = 114, + convert_strl: Sequence[Hashable] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + value_labels: dict[Hashable, dict[float, str]] | None = None, + ) -> None: + """ + Export DataFrame object to Stata dta format. + + Writes the DataFrame to a Stata dataset file. + "dta" files contain a Stata dataset. + + Parameters + ---------- + path : str, path object, or buffer + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. + + convert_dates : dict + Dictionary mapping columns containing datetime types to stata + internal format to use when writing the dates. Options are 'tc', + 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer + or a name. Datetime columns that do not have a conversion type + specified will be converted to 'tc'. Raises NotImplementedError if + a datetime column has timezone information. + write_index : bool + Write the index to Stata dataset. + byteorder : str + Can be ">", "<", "little", or "big". default is `sys.byteorder`. + time_stamp : datetime + A datetime to use as file creation date. Default is the current + time. + data_label : str, optional + A label for the data set. Must be 80 characters or smaller. + variable_labels : dict + Dictionary containing columns as keys and variable labels as + values. Each label must be 80 characters or smaller. + version : {{114, 117, 118, 119, None}}, default 114 + Version to use in the output dta file. Set to None to let pandas + decide between 118 or 119 formats depending on the number of + columns in the frame. Version 114 can be read by Stata 10 and + later. Version 117 can be read by Stata 13 or later. Version 118 + is supported in Stata 14 and later. Version 119 is supported in + Stata 15 and later. Version 114 limits string variables to 244 + characters or fewer while versions 117 and later allow strings + with lengths up to 2,000,000 characters. Versions 118 and 119 + support Unicode characters, and version 119 supports more than + 32,767 variables. + + Version 119 should usually only be used when the number of + variables exceeds the capacity of dta format 118. Exporting + smaller datasets in format 119 may have unintended consequences, + and, as of November 2020, Stata SE cannot read version 119 files. + + convert_strl : list, optional + List of column names to convert to string columns to Stata StrL + format. Only available if version is 117. Storing strings in the + StrL format can produce smaller dta files if strings have more than + 8 characters and values are repeated. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + value_labels : dict of dicts + Dictionary containing columns as keys and dictionaries of column value + to labels as values. Labels for a single variable must be 32,000 + characters or smaller. + + .. versionadded:: 1.4.0 + + Raises + ------ + NotImplementedError + * If datetimes contain timezone information + * Column dtype is not representable in Stata + ValueError + * Columns listed in convert_dates are neither datetime64[ns] + or datetime.datetime + * Column listed in convert_dates is not in DataFrame + * Categorical label contains more than 32,000 characters + + See Also + -------- + read_stata : Import Stata data files. + io.stata.StataWriter : Low-level writer for Stata data files. + io.stata.StataWriter117 : Low-level writer for version 117 files. + + Examples + -------- + >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', + ... 'parrot'], + ... 'speed': [350, 18, 361, 15]}}) + >>> df.to_stata('animals.dta') # doctest: +SKIP + """ + if version not in (114, 117, 118, 119, None): + raise ValueError("Only formats 114, 117, 118 and 119 are supported.") + if version == 114: + if convert_strl is not None: + raise ValueError("strl is not supported in format 114") + from pandas.io.stata import StataWriter as statawriter + elif version == 117: + # Incompatible import of "statawriter" (imported name has type + # "Type[StataWriter117]", local name has type "Type[StataWriter]") + from pandas.io.stata import ( # type: ignore[assignment] + StataWriter117 as statawriter, + ) + else: # versions 118 and 119 + # Incompatible import of "statawriter" (imported name has type + # "Type[StataWriter117]", local name has type "Type[StataWriter]") + from pandas.io.stata import ( # type: ignore[assignment] + StataWriterUTF8 as statawriter, + ) + + kwargs: dict[str, Any] = {} + if version is None or version >= 117: + # strl conversion is only supported >= 117 + kwargs["convert_strl"] = convert_strl + if version is None or version >= 118: + # Specifying the version is only supported for UTF8 (118 or 119) + kwargs["version"] = version + + writer = statawriter( + path, + self, + convert_dates=convert_dates, + byteorder=byteorder, + time_stamp=time_stamp, + data_label=data_label, + write_index=write_index, + variable_labels=variable_labels, + compression=compression, + storage_options=storage_options, + value_labels=value_labels, + **kwargs, + ) + writer.write_file() + + def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: + """ + Write a DataFrame to the binary Feather format. + + Parameters + ---------- + path : str, path object, file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. If a string or a path, + it will be used as Root Directory path when writing a partitioned dataset. + **kwargs : + Additional keywords passed to :func:`pyarrow.feather.write_feather`. + This includes the `compression`, `compression_level`, `chunksize` + and `version` keywords. + + Notes + ----- + This function writes the dataframe as a `feather file + `_. Requires a default + index. For saving the DataFrame with your custom index use a method that + supports custom indices e.g. `to_parquet`. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) + >>> df.to_feather("file.feather") # doctest: +SKIP + """ + from pandas.io.feather_format import to_feather + + to_feather(self, path, **kwargs) + + @deprecate_nonkeyword_arguments( + version="3.0", allowed_args=["self", "buf"], name="to_markdown" + ) + @doc( + Series.to_markdown, + klass=_shared_doc_kwargs["klass"], + storage_options=_shared_docs["storage_options"], + examples="""Examples + -------- + >>> df = pd.DataFrame( + ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} + ... ) + >>> print(df.to_markdown()) + | | animal_1 | animal_2 | + |---:|:-----------|:-----------| + | 0 | elk | dog | + | 1 | pig | quetzal | + + Output markdown with a tabulate option. + + >>> print(df.to_markdown(tablefmt="grid")) + +----+------------+------------+ + | | animal_1 | animal_2 | + +====+============+============+ + | 0 | elk | dog | + +----+------------+------------+ + | 1 | pig | quetzal | + +----+------------+------------+""", + ) + def to_markdown( + self, + buf: FilePath | WriteBuffer[str] | None = None, + mode: str = "wt", + index: bool = True, + storage_options: StorageOptions | None = None, + **kwargs, + ) -> str | None: + if "showindex" in kwargs: + raise ValueError("Pass 'index' instead of 'showindex") + + kwargs.setdefault("headers", "keys") + kwargs.setdefault("tablefmt", "pipe") + kwargs.setdefault("showindex", index) + tabulate = import_optional_dependency("tabulate") + result = tabulate.tabulate(self, **kwargs) + if buf is None: + return result + + with get_handle(buf, mode, storage_options=storage_options) as handles: + handles.handle.write(result) + return None + + @overload + def to_parquet( + self, + path: None = ..., + engine: Literal["auto", "pyarrow", "fastparquet"] = ..., + compression: str | None = ..., + index: bool | None = ..., + partition_cols: list[str] | None = ..., + storage_options: StorageOptions = ..., + **kwargs, + ) -> bytes: + ... + + @overload + def to_parquet( + self, + path: FilePath | WriteBuffer[bytes], + engine: Literal["auto", "pyarrow", "fastparquet"] = ..., + compression: str | None = ..., + index: bool | None = ..., + partition_cols: list[str] | None = ..., + storage_options: StorageOptions = ..., + **kwargs, + ) -> None: + ... + + @deprecate_nonkeyword_arguments( + version="3.0", allowed_args=["self", "path"], name="to_parquet" + ) + @doc(storage_options=_shared_docs["storage_options"]) + def to_parquet( + self, + path: FilePath | WriteBuffer[bytes] | None = None, + engine: Literal["auto", "pyarrow", "fastparquet"] = "auto", + compression: str | None = "snappy", + index: bool | None = None, + partition_cols: list[str] | None = None, + storage_options: StorageOptions | None = None, + **kwargs, + ) -> bytes | None: + """ + Write a DataFrame to the binary parquet format. + + This function writes the dataframe as a `parquet file + `_. You can choose different parquet + backends, and have the option of compression. See + :ref:`the user guide ` for more details. + + Parameters + ---------- + path : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. If None, the result is + returned as bytes. If a string or path, it will be used as Root Directory + path when writing a partitioned dataset. + engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' + Parquet library to use. If 'auto', then the option + ``io.parquet.engine`` is used. The default ``io.parquet.engine`` + behavior is to try 'pyarrow', falling back to 'fastparquet' if + 'pyarrow' is unavailable. + compression : str or None, default 'snappy' + Name of the compression to use. Use ``None`` for no compression. + Supported options: 'snappy', 'gzip', 'brotli', 'lz4', 'zstd'. + index : bool, default None + If ``True``, include the dataframe's index(es) in the file output. + If ``False``, they will not be written to the file. + If ``None``, similar to ``True`` the dataframe's index(es) + will be saved. However, instead of being saved as values, + the RangeIndex will be stored as a range in the metadata so it + doesn't require much space and is faster. Other indexes will + be included as columns in the file output. + partition_cols : list, optional, default None + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + Must be None if path is not a string. + {storage_options} + + **kwargs + Additional arguments passed to the parquet library. See + :ref:`pandas io ` for more details. + + Returns + ------- + bytes if no path argument is provided else None + + See Also + -------- + read_parquet : Read a parquet file. + DataFrame.to_orc : Write an orc file. + DataFrame.to_csv : Write a csv file. + DataFrame.to_sql : Write to a sql table. + DataFrame.to_hdf : Write to hdf. + + Notes + ----- + This function requires either the `fastparquet + `_ or `pyarrow + `_ library. + + Examples + -------- + >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) + >>> df.to_parquet('df.parquet.gzip', + ... compression='gzip') # doctest: +SKIP + >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP + col1 col2 + 0 1 3 + 1 2 4 + + If you want to get a buffer to the parquet content you can use a io.BytesIO + object, as long as you don't use partition_cols, which creates multiple files. + + >>> import io + >>> f = io.BytesIO() + >>> df.to_parquet(f) + >>> f.seek(0) + 0 + >>> content = f.read() + """ + from pandas.io.parquet import to_parquet + + return to_parquet( + self, + path, + engine, + compression=compression, + index=index, + partition_cols=partition_cols, + storage_options=storage_options, + **kwargs, + ) + + def to_orc( + self, + path: FilePath | WriteBuffer[bytes] | None = None, + *, + engine: Literal["pyarrow"] = "pyarrow", + index: bool | None = None, + engine_kwargs: dict[str, Any] | None = None, + ) -> bytes | None: + """ + Write a DataFrame to the ORC format. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + path : str, file-like object or None, default None + If a string, it will be used as Root Directory path + when writing a partitioned dataset. By file-like object, + we refer to objects with a write() method, such as a file handle + (e.g. via builtin open function). If path is None, + a bytes object is returned. + engine : {'pyarrow'}, default 'pyarrow' + ORC library to use. + index : bool, optional + If ``True``, include the dataframe's index(es) in the file output. + If ``False``, they will not be written to the file. + If ``None``, similar to ``infer`` the dataframe's index(es) + will be saved. However, instead of being saved as values, + the RangeIndex will be stored as a range in the metadata so it + doesn't require much space and is faster. Other indexes will + be included as columns in the file output. + engine_kwargs : dict[str, Any] or None, default None + Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. + + Returns + ------- + bytes if no path argument is provided else None + + Raises + ------ + NotImplementedError + Dtype of one or more columns is category, unsigned integers, interval, + period or sparse. + ValueError + engine is not pyarrow. + + See Also + -------- + read_orc : Read a ORC file. + DataFrame.to_parquet : Write a parquet file. + DataFrame.to_csv : Write a csv file. + DataFrame.to_sql : Write to a sql table. + DataFrame.to_hdf : Write to hdf. + + Notes + ----- + * Before using this function you should read the :ref:`user guide about + ORC ` and :ref:`install optional dependencies `. + * This function requires `pyarrow `_ + library. + * For supported dtypes please refer to `supported ORC features in Arrow + `__. + * Currently timezones in datetime columns are not preserved when a + dataframe is converted into ORC files. + + Examples + -------- + >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) + >>> df.to_orc('df.orc') # doctest: +SKIP + >>> pd.read_orc('df.orc') # doctest: +SKIP + col1 col2 + 0 1 4 + 1 2 3 + + If you want to get a buffer to the orc content you can write it to io.BytesIO + + >>> import io + >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP + >>> b.seek(0) # doctest: +SKIP + 0 + >>> content = b.read() # doctest: +SKIP + """ + from pandas.io.orc import to_orc + + return to_orc( + self, path, engine=engine, index=index, engine_kwargs=engine_kwargs + ) + + @overload + def to_html( + self, + buf: FilePath | WriteBuffer[str], + columns: Axes | None = ..., + col_space: ColspaceArgType | None = ..., + header: bool = ..., + index: bool = ..., + na_rep: str = ..., + formatters: FormattersType | None = ..., + float_format: FloatFormatType | None = ..., + sparsify: bool | None = ..., + index_names: bool = ..., + justify: str | None = ..., + max_rows: int | None = ..., + max_cols: int | None = ..., + show_dimensions: bool | str = ..., + decimal: str = ..., + bold_rows: bool = ..., + classes: str | list | tuple | None = ..., + escape: bool = ..., + notebook: bool = ..., + border: int | bool | None = ..., + table_id: str | None = ..., + render_links: bool = ..., + encoding: str | None = ..., + ) -> None: + ... + + @overload + def to_html( + self, + buf: None = ..., + columns: Axes | None = ..., + col_space: ColspaceArgType | None = ..., + header: bool = ..., + index: bool = ..., + na_rep: str = ..., + formatters: FormattersType | None = ..., + float_format: FloatFormatType | None = ..., + sparsify: bool | None = ..., + index_names: bool = ..., + justify: str | None = ..., + max_rows: int | None = ..., + max_cols: int | None = ..., + show_dimensions: bool | str = ..., + decimal: str = ..., + bold_rows: bool = ..., + classes: str | list | tuple | None = ..., + escape: bool = ..., + notebook: bool = ..., + border: int | bool | None = ..., + table_id: str | None = ..., + render_links: bool = ..., + encoding: str | None = ..., + ) -> str: + ... + + @deprecate_nonkeyword_arguments( + version="3.0", allowed_args=["self", "buf"], name="to_html" + ) + @Substitution( + header_type="bool", + header="Whether to print column labels, default True", + col_space_type="str or int, list or dict of int or str", + col_space="The minimum width of each column in CSS length " + "units. An int is assumed to be px units.", + ) + @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) + def to_html( + self, + buf: FilePath | WriteBuffer[str] | None = None, + columns: Axes | None = None, + col_space: ColspaceArgType | None = None, + header: bool = True, + index: bool = True, + na_rep: str = "NaN", + formatters: FormattersType | None = None, + float_format: FloatFormatType | None = None, + sparsify: bool | None = None, + index_names: bool = True, + justify: str | None = None, + max_rows: int | None = None, + max_cols: int | None = None, + show_dimensions: bool | str = False, + decimal: str = ".", + bold_rows: bool = True, + classes: str | list | tuple | None = None, + escape: bool = True, + notebook: bool = False, + border: int | bool | None = None, + table_id: str | None = None, + render_links: bool = False, + encoding: str | None = None, + ) -> str | None: + """ + Render a DataFrame as an HTML table. + %(shared_params)s + bold_rows : bool, default True + Make the row labels bold in the output. + classes : str or list or tuple, default None + CSS class(es) to apply to the resulting html table. + escape : bool, default True + Convert the characters <, >, and & to HTML-safe sequences. + notebook : {True, False}, default False + Whether the generated HTML is for IPython Notebook. + border : int + A ``border=border`` attribute is included in the opening + `` tag. Default ``pd.options.display.html.border``. + table_id : str, optional + A css id is included in the opening `
` tag if specified. + render_links : bool, default False + Convert URLs to HTML links. + encoding : str, default "utf-8" + Set character encoding. + %(returns)s + See Also + -------- + to_string : Convert DataFrame to a string. + + Examples + -------- + >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) + >>> html_string = '''
+ ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ...
col1col2
014
123
''' + >>> assert html_string == df.to_html() + """ + if justify is not None and justify not in fmt.VALID_JUSTIFY_PARAMETERS: + raise ValueError("Invalid value for justify parameter") + + formatter = fmt.DataFrameFormatter( + self, + columns=columns, + col_space=col_space, + na_rep=na_rep, + header=header, + index=index, + formatters=formatters, + float_format=float_format, + bold_rows=bold_rows, + sparsify=sparsify, + justify=justify, + index_names=index_names, + escape=escape, + decimal=decimal, + max_rows=max_rows, + max_cols=max_cols, + show_dimensions=show_dimensions, + ) + # TODO: a generic formatter wld b in DataFrameFormatter + return fmt.DataFrameRenderer(formatter).to_html( + buf=buf, + classes=classes, + notebook=notebook, + border=border, + encoding=encoding, + table_id=table_id, + render_links=render_links, + ) + + @overload + def to_xml( + self, + path_or_buffer: None = ..., + *, + index: bool = ..., + root_name: str | None = ..., + row_name: str | None = ..., + na_rep: str | None = ..., + attr_cols: list[str] | None = ..., + elem_cols: list[str] | None = ..., + namespaces: dict[str | None, str] | None = ..., + prefix: str | None = ..., + encoding: str = ..., + xml_declaration: bool | None = ..., + pretty_print: bool | None = ..., + parser: XMLParsers | None = ..., + stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = ..., + compression: CompressionOptions = ..., + storage_options: StorageOptions | None = ..., + ) -> str: + ... + + @overload + def to_xml( + self, + path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str], + *, + index: bool = ..., + root_name: str | None = ..., + row_name: str | None = ..., + na_rep: str | None = ..., + attr_cols: list[str] | None = ..., + elem_cols: list[str] | None = ..., + namespaces: dict[str | None, str] | None = ..., + prefix: str | None = ..., + encoding: str = ..., + xml_declaration: bool | None = ..., + pretty_print: bool | None = ..., + parser: XMLParsers | None = ..., + stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = ..., + compression: CompressionOptions = ..., + storage_options: StorageOptions | None = ..., + ) -> None: + ... + + @deprecate_nonkeyword_arguments( + version="3.0", allowed_args=["self", "path_or_buffer"], name="to_xml" + ) + @doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "path_or_buffer", + ) + def to_xml( + self, + path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, + index: bool = True, + root_name: str | None = "data", + row_name: str | None = "row", + na_rep: str | None = None, + attr_cols: list[str] | None = None, + elem_cols: list[str] | None = None, + namespaces: dict[str | None, str] | None = None, + prefix: str | None = None, + encoding: str = "utf-8", + xml_declaration: bool | None = True, + pretty_print: bool | None = True, + parser: XMLParsers | None = "lxml", + stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + ) -> str | None: + """ + Render a DataFrame to an XML document. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + path_or_buffer : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a ``write()`` function. If None, the result is returned + as a string. + index : bool, default True + Whether to include index in XML document. + root_name : str, default 'data' + The name of root element in XML document. + row_name : str, default 'row' + The name of row element in XML document. + na_rep : str, optional + Missing data representation. + attr_cols : list-like, optional + List of columns to write as attributes in row element. + Hierarchical columns will be flattened with underscore + delimiting the different levels. + elem_cols : list-like, optional + List of columns to write as children in row element. By default, + all columns output as children of row element. Hierarchical + columns will be flattened with underscore delimiting the + different levels. + namespaces : dict, optional + All namespaces to be defined in root element. Keys of dict + should be prefix names and values of dict corresponding URIs. + Default namespaces should be given empty string key. For + example, :: + + namespaces = {{"": "https://example.com"}} + + prefix : str, optional + Namespace prefix to be used for every element and/or attribute + in document. This should be one of the keys in ``namespaces`` + dict. + encoding : str, default 'utf-8' + Encoding of the resulting document. + xml_declaration : bool, default True + Whether to include the XML declaration at start of document. + pretty_print : bool, default True + Whether output should be pretty printed with indentation and + line breaks. + parser : {{'lxml','etree'}}, default 'lxml' + Parser module to use for building of tree. Only 'lxml' and + 'etree' are supported. With 'lxml', the ability to use XSLT + stylesheet is supported. + stylesheet : str, path object or file-like object, optional + A URL, file-like object, or a raw string containing an XSLT + script used to transform the raw XML output. Script should use + layout of elements and attributes from original output. This + argument requires ``lxml`` to be installed. Only XSLT 1.0 + scripts and not later versions is currently supported. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + Returns + ------- + None or str + If ``io`` is None, returns the resulting XML format as a + string. Otherwise returns None. + + See Also + -------- + to_json : Convert the pandas object to a JSON string. + to_html : Convert DataFrame to a html. + + Examples + -------- + >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], + ... 'degrees': [360, 360, 180], + ... 'sides': [4, np.nan, 3]}}) + + >>> df.to_xml() # doctest: +SKIP + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + + + + >>> df.to_xml(attr_cols=[ + ... 'index', 'shape', 'degrees', 'sides' + ... ]) # doctest: +SKIP + + + + + + + + >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, + ... prefix="doc") # doctest: +SKIP + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + + + """ + + from pandas.io.formats.xml import ( + EtreeXMLFormatter, + LxmlXMLFormatter, + ) + + lxml = import_optional_dependency("lxml.etree", errors="ignore") + + TreeBuilder: type[EtreeXMLFormatter | LxmlXMLFormatter] + + if parser == "lxml": + if lxml is not None: + TreeBuilder = LxmlXMLFormatter + else: + raise ImportError( + "lxml not found, please install or use the etree parser." + ) + + elif parser == "etree": + TreeBuilder = EtreeXMLFormatter + + else: + raise ValueError("Values for parser can only be lxml or etree.") + + xml_formatter = TreeBuilder( + self, + path_or_buffer=path_or_buffer, + index=index, + root_name=root_name, + row_name=row_name, + na_rep=na_rep, + attr_cols=attr_cols, + elem_cols=elem_cols, + namespaces=namespaces, + prefix=prefix, + encoding=encoding, + xml_declaration=xml_declaration, + pretty_print=pretty_print, + stylesheet=stylesheet, + compression=compression, + storage_options=storage_options, + ) + + return xml_formatter.write_output() + + # ---------------------------------------------------------------------- + @doc(INFO_DOCSTRING, **frame_sub_kwargs) + def info( + self, + verbose: bool | None = None, + buf: WriteBuffer[str] | None = None, + max_cols: int | None = None, + memory_usage: bool | str | None = None, + show_counts: bool | None = None, + ) -> None: + info = DataFrameInfo( + data=self, + memory_usage=memory_usage, + ) + info.render( + buf=buf, + max_cols=max_cols, + verbose=verbose, + show_counts=show_counts, + ) + + def memory_usage(self, index: bool = True, deep: bool = False) -> Series: + """ + Return the memory usage of each column in bytes. + + The memory usage can optionally include the contribution of + the index and elements of `object` dtype. + + This value is displayed in `DataFrame.info` by default. This can be + suppressed by setting ``pandas.options.display.memory_usage`` to False. + + Parameters + ---------- + index : bool, default True + Specifies whether to include the memory usage of the DataFrame's + index in returned Series. If ``index=True``, the memory usage of + the index is the first item in the output. + deep : bool, default False + If True, introspect the data deeply by interrogating + `object` dtypes for system-level memory consumption, and include + it in the returned values. + + Returns + ------- + Series + A Series whose index is the original column names and whose values + is the memory usage of each column in bytes. + + See Also + -------- + numpy.ndarray.nbytes : Total bytes consumed by the elements of an + ndarray. + Series.memory_usage : Bytes consumed by a Series. + Categorical : Memory-efficient array for string values with + many repeated values. + DataFrame.info : Concise summary of a DataFrame. + + Notes + ----- + See the :ref:`Frequently Asked Questions ` for more + details. + + Examples + -------- + >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] + >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) + ... for t in dtypes]) + >>> df = pd.DataFrame(data) + >>> df.head() + int64 float64 complex128 object bool + 0 1 1.0 1.0+0.0j 1 True + 1 1 1.0 1.0+0.0j 1 True + 2 1 1.0 1.0+0.0j 1 True + 3 1 1.0 1.0+0.0j 1 True + 4 1 1.0 1.0+0.0j 1 True + + >>> df.memory_usage() + Index 128 + int64 40000 + float64 40000 + complex128 80000 + object 40000 + bool 5000 + dtype: int64 + + >>> df.memory_usage(index=False) + int64 40000 + float64 40000 + complex128 80000 + object 40000 + bool 5000 + dtype: int64 + + The memory footprint of `object` dtype columns is ignored by default: + + >>> df.memory_usage(deep=True) + Index 128 + int64 40000 + float64 40000 + complex128 80000 + object 180000 + bool 5000 + dtype: int64 + + Use a Categorical for efficient storage of an object-dtype column with + many repeated values. + + >>> df['object'].astype('category').memory_usage(deep=True) + 5244 + """ + result = self._constructor_sliced( + [c.memory_usage(index=False, deep=deep) for col, c in self.items()], + index=self.columns, + dtype=np.intp, + ) + if index: + index_memory_usage = self._constructor_sliced( + self.index.memory_usage(deep=deep), index=["Index"] + ) + result = index_memory_usage._append(result) + return result + + def transpose(self, *args, copy: bool = False) -> DataFrame: + """ + Transpose index and columns. + + Reflect the DataFrame over its main diagonal by writing rows as columns + and vice-versa. The property :attr:`.T` is an accessor to the method + :meth:`transpose`. + + Parameters + ---------- + *args : tuple, optional + Accepted for compatibility with NumPy. + copy : bool, default False + Whether to copy the data after transposing, even for DataFrames + with a single dtype. + + Note that a copy is always required for mixed dtype DataFrames, + or for DataFrames with any extension types. + + .. note:: + The `copy` keyword will change behavior in pandas 3.0. + `Copy-on-Write + `__ + will be enabled by default, which means that all methods with a + `copy` keyword will use a lazy copy mechanism to defer the copy and + ignore the `copy` keyword. The `copy` keyword will be removed in a + future version of pandas. + + You can already get the future behavior and improvements through + enabling copy on write ``pd.options.mode.copy_on_write = True`` + + Returns + ------- + DataFrame + The transposed DataFrame. + + See Also + -------- + numpy.transpose : Permute the dimensions of a given array. + + Notes + ----- + Transposing a DataFrame with mixed dtypes will result in a homogeneous + DataFrame with the `object` dtype. In such a case, a copy of the data + is always made. + + Examples + -------- + **Square DataFrame with homogeneous dtype** + + >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} + >>> df1 = pd.DataFrame(data=d1) + >>> df1 + col1 col2 + 0 1 3 + 1 2 4 + + >>> df1_transposed = df1.T # or df1.transpose() + >>> df1_transposed + 0 1 + col1 1 2 + col2 3 4 + + When the dtype is homogeneous in the original DataFrame, we get a + transposed DataFrame with the same dtype: + + >>> df1.dtypes + col1 int64 + col2 int64 + dtype: object + >>> df1_transposed.dtypes + 0 int64 + 1 int64 + dtype: object + + **Non-square DataFrame with mixed dtypes** + + >>> d2 = {'name': ['Alice', 'Bob'], + ... 'score': [9.5, 8], + ... 'employed': [False, True], + ... 'kids': [0, 0]} + >>> df2 = pd.DataFrame(data=d2) + >>> df2 + name score employed kids + 0 Alice 9.5 False 0 + 1 Bob 8.0 True 0 + + >>> df2_transposed = df2.T # or df2.transpose() + >>> df2_transposed + 0 1 + name Alice Bob + score 9.5 8.0 + employed False True + kids 0 0 + + When the DataFrame has mixed dtypes, we get a transposed DataFrame with + the `object` dtype: + + >>> df2.dtypes + name object + score float64 + employed bool + kids int64 + dtype: object + >>> df2_transposed.dtypes + 0 object + 1 object + dtype: object + """ + nv.validate_transpose(args, {}) + # construct the args + + dtypes = list(self.dtypes) + + if self._can_fast_transpose: + # Note: tests pass without this, but this improves perf quite a bit. + new_vals = self._values.T + if copy and not using_copy_on_write(): + new_vals = new_vals.copy() + + result = self._constructor( + new_vals, + index=self.columns, + columns=self.index, + copy=False, + dtype=new_vals.dtype, + ) + if using_copy_on_write() and len(self) > 0: + result._mgr.add_references(self._mgr) # type: ignore[arg-type] + + elif ( + self._is_homogeneous_type + and dtypes + and isinstance(dtypes[0], ExtensionDtype) + ): + new_values: list + if isinstance(dtypes[0], BaseMaskedDtype): + # We have masked arrays with the same dtype. We can transpose faster. + from pandas.core.arrays.masked import ( + transpose_homogeneous_masked_arrays, + ) + + new_values = transpose_homogeneous_masked_arrays( + cast(Sequence[BaseMaskedArray], self._iter_column_arrays()) + ) + elif isinstance(dtypes[0], ArrowDtype): + # We have arrow EAs with the same dtype. We can transpose faster. + from pandas.core.arrays.arrow.array import ( + ArrowExtensionArray, + transpose_homogeneous_pyarrow, + ) + + new_values = transpose_homogeneous_pyarrow( + cast(Sequence[ArrowExtensionArray], self._iter_column_arrays()) + ) + else: + # We have other EAs with the same dtype. We preserve dtype in transpose. + dtyp = dtypes[0] + arr_typ = dtyp.construct_array_type() + values = self.values + new_values = [arr_typ._from_sequence(row, dtype=dtyp) for row in values] + + result = type(self)._from_arrays( + new_values, + index=self.columns, + columns=self.index, + verify_integrity=False, + ) + + else: + new_arr = self.values.T + if copy and not using_copy_on_write(): + new_arr = new_arr.copy() + result = self._constructor( + new_arr, + index=self.columns, + columns=self.index, + dtype=new_arr.dtype, + # We already made a copy (more than one block) + copy=False, + ) + + return result.__finalize__(self, method="transpose") + + @property + def T(self) -> DataFrame: + """ + The transpose of the DataFrame. + + Returns + ------- + DataFrame + The transposed DataFrame. + + See Also + -------- + DataFrame.transpose : Transpose index and columns. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df + col1 col2 + 0 1 3 + 1 2 4 + + >>> df.T + 0 1 + col1 1 2 + col2 3 4 + """ + return self.transpose() + + # ---------------------------------------------------------------------- + # Indexing Methods + + def _ixs(self, i: int, axis: AxisInt = 0) -> Series: + """ + Parameters + ---------- + i : int + axis : int + + Returns + ------- + Series + """ + # irow + if axis == 0: + new_mgr = self._mgr.fast_xs(i) + + # if we are a copy, mark as such + copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None + result = self._constructor_sliced_from_mgr(new_mgr, axes=new_mgr.axes) + result._name = self.index[i] + result = result.__finalize__(self) + result._set_is_copy(self, copy=copy) + return result + + # icol + else: + label = self.columns[i] + + col_mgr = self._mgr.iget(i) + result = self._box_col_values(col_mgr, i) + + # this is a cached value, mark it so + result._set_as_cached(label, self) + return result + + def _get_column_array(self, i: int) -> ArrayLike: + """ + Get the values of the i'th column (ndarray or ExtensionArray, as stored + in the Block) + + Warning! The returned array is a view but doesn't handle Copy-on-Write, + so this should be used with caution (for read-only purposes). + """ + return self._mgr.iget_values(i) + + def _iter_column_arrays(self) -> Iterator[ArrayLike]: + """ + Iterate over the arrays of all columns in order. + This returns the values as stored in the Block (ndarray or ExtensionArray). + + Warning! The returned array is a view but doesn't handle Copy-on-Write, + so this should be used with caution (for read-only purposes). + """ + if isinstance(self._mgr, ArrayManager): + yield from self._mgr.arrays + else: + for i in range(len(self.columns)): + yield self._get_column_array(i) + + def _getitem_nocopy(self, key: list): + """ + Behaves like __getitem__, but returns a view in cases where __getitem__ + would make a copy. + """ + # TODO(CoW): can be removed if/when we are always Copy-on-Write + indexer = self.columns._get_indexer_strict(key, "columns")[1] + new_axis = self.columns[indexer] + + new_mgr = self._mgr.reindex_indexer( + new_axis, + indexer, + axis=0, + allow_dups=True, + copy=False, + only_slice=True, + ) + result = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) + result = result.__finalize__(self) + return result + + def __getitem__(self, key): + check_dict_or_set_indexers(key) + key = lib.item_from_zerodim(key) + key = com.apply_if_callable(key, self) + + if is_hashable(key) and not is_iterator(key): + # is_iterator to exclude generator e.g. test_getitem_listlike + # shortcut if the key is in columns + is_mi = isinstance(self.columns, MultiIndex) + # GH#45316 Return view if key is not duplicated + # Only use drop_duplicates with duplicates for performance + if not is_mi and ( + self.columns.is_unique + and key in self.columns + or key in self.columns.drop_duplicates(keep=False) + ): + return self._get_item_cache(key) + + elif is_mi and self.columns.is_unique and key in self.columns: + return self._getitem_multilevel(key) + + # Do we have a slicer (on rows)? + if isinstance(key, slice): + return self._getitem_slice(key) + + # Do we have a (boolean) DataFrame? + if isinstance(key, DataFrame): + return self.where(key) + + # Do we have a (boolean) 1d indexer? + if com.is_bool_indexer(key): + return self._getitem_bool_array(key) + + # We are left with two options: a single key, and a collection of keys, + # We interpret tuples as collections only for non-MultiIndex + is_single_key = isinstance(key, tuple) or not is_list_like(key) + + if is_single_key: + if self.columns.nlevels > 1: + return self._getitem_multilevel(key) + indexer = self.columns.get_loc(key) + if is_integer(indexer): + indexer = [indexer] + else: + if is_iterator(key): + key = list(key) + indexer = self.columns._get_indexer_strict(key, "columns")[1] + + # take() does not accept boolean indexers + if getattr(indexer, "dtype", None) == bool: + indexer = np.where(indexer)[0] + + if isinstance(indexer, slice): + return self._slice(indexer, axis=1) + + data = self._take_with_is_copy(indexer, axis=1) + + if is_single_key: + # What does looking for a single key in a non-unique index return? + # The behavior is inconsistent. It returns a Series, except when + # - the key itself is repeated (test on data.shape, #9519), or + # - we have a MultiIndex on columns (test on self.columns, #21309) + if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): + # GH#26490 using data[key] can cause RecursionError + return data._get_item_cache(key) + + return data + + def _getitem_bool_array(self, key): + # also raises Exception if object array with NA values + # warning here just in case -- previously __setitem__ was + # reindexing but __getitem__ was not; it seems more reasonable to + # go with the __setitem__ behavior since that is more consistent + # with all other indexing behavior + if isinstance(key, Series) and not key.index.equals(self.index): + warnings.warn( + "Boolean Series key will be reindexed to match DataFrame index.", + UserWarning, + stacklevel=find_stack_level(), + ) + elif len(key) != len(self.index): + raise ValueError( + f"Item wrong length {len(key)} instead of {len(self.index)}." + ) + + # check_bool_indexer will throw exception if Series key cannot + # be reindexed to match DataFrame rows + key = check_bool_indexer(self.index, key) + + if key.all(): + return self.copy(deep=None) + + indexer = key.nonzero()[0] + return self._take_with_is_copy(indexer, axis=0) + + def _getitem_multilevel(self, key): + # self.columns is a MultiIndex + loc = self.columns.get_loc(key) + if isinstance(loc, (slice, np.ndarray)): + new_columns = self.columns[loc] + result_columns = maybe_droplevels(new_columns, key) + result = self.iloc[:, loc] + result.columns = result_columns + + # If there is only one column being returned, and its name is + # either an empty string, or a tuple with an empty string as its + # first element, then treat the empty string as a placeholder + # and return the column as if the user had provided that empty + # string in the key. If the result is a Series, exclude the + # implied empty string from its name. + if len(result.columns) == 1: + # e.g. test_frame_getitem_multicolumn_empty_level, + # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice + top = result.columns[0] + if isinstance(top, tuple): + top = top[0] + if top == "": + result = result[""] + if isinstance(result, Series): + result = self._constructor_sliced( + result, index=self.index, name=key + ) + + result._set_is_copy(self) + return result + else: + # loc is neither a slice nor ndarray, so must be an int + return self._ixs(loc, axis=1) + + def _get_value(self, index, col, takeable: bool = False) -> Scalar: + """ + Quickly retrieve single value at passed column and index. + + Parameters + ---------- + index : row label + col : column label + takeable : interpret the index/col as indexers, default False + + Returns + ------- + scalar + + Notes + ----- + Assumes that both `self.index._index_as_unique` and + `self.columns._index_as_unique`; Caller is responsible for checking. + """ + if takeable: + series = self._ixs(col, axis=1) + return series._values[index] + + series = self._get_item_cache(col) + engine = self.index._engine + + if not isinstance(self.index, MultiIndex): + # CategoricalIndex: Trying to use the engine fastpath may give incorrect + # results if our categories are integers that dont match our codes + # IntervalIndex: IntervalTree has no get_loc + row = self.index.get_loc(index) + return series._values[row] + + # For MultiIndex going through engine effectively restricts us to + # same-length tuples; see test_get_set_value_no_partial_indexing + loc = engine.get_loc(index) + return series._values[loc] + + def isetitem(self, loc, value) -> None: + """ + Set the given value in the column with position `loc`. + + This is a positional analogue to ``__setitem__``. + + Parameters + ---------- + loc : int or sequence of ints + Index position for the column. + value : scalar or arraylike + Value(s) for the column. + + Notes + ----- + ``frame.isetitem(loc, value)`` is an in-place method as it will + modify the DataFrame in place (not returning a new object). In contrast to + ``frame.iloc[:, i] = value`` which will try to update the existing values in + place, ``frame.isetitem(loc, value)`` will not update the values of the column + itself in place, it will instead insert a new array. + + In cases where ``frame.columns`` is unique, this is equivalent to + ``frame[frame.columns[i]] = value``. + """ + if isinstance(value, DataFrame): + if is_integer(loc): + loc = [loc] + + if len(loc) != len(value.columns): + raise ValueError( + f"Got {len(loc)} positions but value has {len(value.columns)} " + f"columns." + ) + + for i, idx in enumerate(loc): + arraylike, refs = self._sanitize_column(value.iloc[:, i]) + self._iset_item_mgr(idx, arraylike, inplace=False, refs=refs) + return + + arraylike, refs = self._sanitize_column(value) + self._iset_item_mgr(loc, arraylike, inplace=False, refs=refs) + + def __setitem__(self, key, value) -> None: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= 3: + warnings.warn( + _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 + ) + elif not PYPY and not using_copy_on_write(): + if sys.getrefcount(self) <= 3 and ( + warn_copy_on_write() + or ( + not warn_copy_on_write() + and any(b.refs.has_reference() for b in self._mgr.blocks) # type: ignore[union-attr] + ) + ): + warnings.warn( + _chained_assignment_warning_msg, FutureWarning, stacklevel=2 + ) + + key = com.apply_if_callable(key, self) + + # see if we can slice the rows + if isinstance(key, slice): + slc = self.index._convert_slice_indexer(key, kind="getitem") + return self._setitem_slice(slc, value) + + if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: + self._setitem_frame(key, value) + elif isinstance(key, (Series, np.ndarray, list, Index)): + self._setitem_array(key, value) + elif isinstance(value, DataFrame): + self._set_item_frame_value(key, value) + elif ( + is_list_like(value) + and not self.columns.is_unique + and 1 < len(self.columns.get_indexer_for([key])) == len(value) + ): + # Column to set is duplicated + self._setitem_array([key], value) + else: + # set column + self._set_item(key, value) + + def _setitem_slice(self, key: slice, value) -> None: + # NB: we can't just use self.loc[key] = value because that + # operates on labels and we need to operate positional for + # backwards-compat, xref GH#31469 + self._check_setitem_copy() + self.iloc[key] = value + + def _setitem_array(self, key, value): + # also raises Exception if object array with NA values + if com.is_bool_indexer(key): + # bool indexer is indexing along rows + if len(key) != len(self.index): + raise ValueError( + f"Item wrong length {len(key)} instead of {len(self.index)}!" + ) + key = check_bool_indexer(self.index, key) + indexer = key.nonzero()[0] + self._check_setitem_copy() + if isinstance(value, DataFrame): + # GH#39931 reindex since iloc does not align + value = value.reindex(self.index.take(indexer)) + self.iloc[indexer] = value + + else: + # Note: unlike self.iloc[:, indexer] = value, this will + # never try to overwrite values inplace + + if isinstance(value, DataFrame): + check_key_length(self.columns, key, value) + for k1, k2 in zip(key, value.columns): + self[k1] = value[k2] + + elif not is_list_like(value): + for col in key: + self[col] = value + + elif isinstance(value, np.ndarray) and value.ndim == 2: + self._iset_not_inplace(key, value) + + elif np.ndim(value) > 1: + # list of lists + value = DataFrame(value).values + return self._setitem_array(key, value) + + else: + self._iset_not_inplace(key, value) + + def _iset_not_inplace(self, key, value): + # GH#39510 when setting with df[key] = obj with a list-like key and + # list-like value, we iterate over those listlikes and set columns + # one at a time. This is different from dispatching to + # `self.loc[:, key]= value` because loc.__setitem__ may overwrite + # data inplace, whereas this will insert new arrays. + + def igetitem(obj, i: int): + # Note: we catch DataFrame obj before getting here, but + # hypothetically would return obj.iloc[:, i] + if isinstance(obj, np.ndarray): + return obj[..., i] + else: + return obj[i] + + if self.columns.is_unique: + if np.shape(value)[-1] != len(key): + raise ValueError("Columns must be same length as key") + + for i, col in enumerate(key): + self[col] = igetitem(value, i) + + else: + ilocs = self.columns.get_indexer_non_unique(key)[0] + if (ilocs < 0).any(): + # key entries not in self.columns + raise NotImplementedError + + if np.shape(value)[-1] != len(ilocs): + raise ValueError("Columns must be same length as key") + + assert np.ndim(value) <= 2 + + orig_columns = self.columns + + # Using self.iloc[:, i] = ... may set values inplace, which + # by convention we do not do in __setitem__ + try: + self.columns = Index(range(len(self.columns))) + for i, iloc in enumerate(ilocs): + self[iloc] = igetitem(value, i) + finally: + self.columns = orig_columns + + def _setitem_frame(self, key, value): + # support boolean setting with DataFrame input, e.g. + # df[df > df2] = 0 + if isinstance(key, np.ndarray): + if key.shape != self.shape: + raise ValueError("Array conditional must be same shape as self") + key = self._constructor(key, **self._construct_axes_dict(), copy=False) + + if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): + raise TypeError( + "Must pass DataFrame or 2-d ndarray with boolean values only" + ) + + self._check_setitem_copy() + self._where(-key, value, inplace=True) + + def _set_item_frame_value(self, key, value: DataFrame) -> None: + self._ensure_valid_index(value) + + # align columns + if key in self.columns: + loc = self.columns.get_loc(key) + cols = self.columns[loc] + len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) + if len_cols != len(value.columns): + raise ValueError("Columns must be same length as key") + + # align right-hand-side columns if self.columns + # is multi-index and self[key] is a sub-frame + if isinstance(self.columns, MultiIndex) and isinstance( + loc, (slice, Series, np.ndarray, Index) + ): + cols_droplevel = maybe_droplevels(cols, key) + if len(cols_droplevel) and not cols_droplevel.equals(value.columns): + value = value.reindex(cols_droplevel, axis=1) + + for col, col_droplevel in zip(cols, cols_droplevel): + self[col] = value[col_droplevel] + return + + if is_scalar(cols): + self[cols] = value[value.columns[0]] + return + + locs: np.ndarray | list + if isinstance(loc, slice): + locs = np.arange(loc.start, loc.stop, loc.step) + elif is_scalar(loc): + locs = [loc] + else: + locs = loc.nonzero()[0] + + return self.isetitem(locs, value) + + if len(value.columns) > 1: + raise ValueError( + "Cannot set a DataFrame with multiple columns to the single " + f"column {key}" + ) + elif len(value.columns) == 0: + raise ValueError( + f"Cannot set a DataFrame without columns to the column {key}" + ) + + self[key] = value[value.columns[0]] + + def _iset_item_mgr( + self, + loc: int | slice | np.ndarray, + value, + inplace: bool = False, + refs: BlockValuesRefs | None = None, + ) -> None: + # when called from _set_item_mgr loc can be anything returned from get_loc + self._mgr.iset(loc, value, inplace=inplace, refs=refs) + self._clear_item_cache() + + def _set_item_mgr( + self, key, value: ArrayLike, refs: BlockValuesRefs | None = None + ) -> None: + try: + loc = self._info_axis.get_loc(key) + except KeyError: + # This item wasn't present, just insert at end + self._mgr.insert(len(self._info_axis), key, value, refs) + else: + self._iset_item_mgr(loc, value, refs=refs) + + # check if we are modifying a copy + # try to set first as we want an invalid + # value exception to occur first + if len(self): + self._check_setitem_copy() + + def _iset_item(self, loc: int, value: Series, inplace: bool = True) -> None: + # We are only called from _replace_columnwise which guarantees that + # no reindex is necessary + if using_copy_on_write(): + self._iset_item_mgr( + loc, value._values, inplace=inplace, refs=value._references + ) + else: + self._iset_item_mgr(loc, value._values.copy(), inplace=True) + + # check if we are modifying a copy + # try to set first as we want an invalid + # value exception to occur first + if len(self): + self._check_setitem_copy() + + def _set_item(self, key, value) -> None: + """ + Add series to DataFrame in specified column. + + If series is a numpy-array (not a Series/TimeSeries), it must be the + same length as the DataFrames index or an error will be thrown. + + Series/TimeSeries will be conformed to the DataFrames index to + ensure homogeneity. + """ + value, refs = self._sanitize_column(value) + + if ( + key in self.columns + and value.ndim == 1 + and not isinstance(value.dtype, ExtensionDtype) + ): + # broadcast across multiple columns if necessary + if not self.columns.is_unique or isinstance(self.columns, MultiIndex): + existing_piece = self[key] + if isinstance(existing_piece, DataFrame): + value = np.tile(value, (len(existing_piece.columns), 1)).T + refs = None + + self._set_item_mgr(key, value, refs) + + def _set_value( + self, index: IndexLabel, col, value: Scalar, takeable: bool = False + ) -> None: + """ + Put single value at passed column and index. + + Parameters + ---------- + index : Label + row label + col : Label + column label + value : scalar + takeable : bool, default False + Sets whether or not index/col interpreted as indexers + """ + try: + if takeable: + icol = col + iindex = cast(int, index) + else: + icol = self.columns.get_loc(col) + iindex = self.index.get_loc(index) + self._mgr.column_setitem(icol, iindex, value, inplace_only=True) + self._clear_item_cache() + + except (KeyError, TypeError, ValueError, LossySetitemError): + # get_loc might raise a KeyError for missing labels (falling back + # to (i)loc will do expansion of the index) + # column_setitem will do validation that may raise TypeError, + # ValueError, or LossySetitemError + # set using a non-recursive method & reset the cache + if takeable: + self.iloc[index, col] = value + else: + self.loc[index, col] = value + self._item_cache.pop(col, None) + + except InvalidIndexError as ii_err: + # GH48729: Seems like you are trying to assign a value to a + # row when only scalar options are permitted + raise InvalidIndexError( + f"You can only assign a scalar value not a {type(value)}" + ) from ii_err + + def _ensure_valid_index(self, value) -> None: + """ + Ensure that if we don't have an index, that we can create one from the + passed value. + """ + # GH5632, make sure that we are a Series convertible + if not len(self.index) and is_list_like(value) and len(value): + if not isinstance(value, DataFrame): + try: + value = Series(value) + except (ValueError, NotImplementedError, TypeError) as err: + raise ValueError( + "Cannot set a frame with no defined index " + "and a value that cannot be converted to a Series" + ) from err + + # GH31368 preserve name of index + index_copy = value.index.copy() + if self.index.name is not None: + index_copy.name = self.index.name + + self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) + + def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: + """ + Provide boxed values for a column. + """ + # Lookup in columns so that if e.g. a str datetime was passed + # we attach the Timestamp object as the name. + name = self.columns[loc] + # We get index=self.index bc values is a SingleDataManager + obj = self._constructor_sliced_from_mgr(values, axes=values.axes) + obj._name = name + return obj.__finalize__(self) + + # ---------------------------------------------------------------------- + # Lookup Caching + + def _clear_item_cache(self) -> None: + self._item_cache.clear() + + def _get_item_cache(self, item: Hashable) -> Series: + """Return the cached item, item represents a label indexer.""" + if using_copy_on_write() or warn_copy_on_write(): + loc = self.columns.get_loc(item) + return self._ixs(loc, axis=1) + + cache = self._item_cache + res = cache.get(item) + if res is None: + # All places that call _get_item_cache have unique columns, + # pending resolution of GH#33047 + + loc = self.columns.get_loc(item) + res = self._ixs(loc, axis=1) + + cache[item] = res + + # for a chain + res._is_copy = self._is_copy + return res + + def _reset_cacher(self) -> None: + # no-op for DataFrame + pass + + def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: + """ + The object has called back to us saying maybe it has changed. + """ + loc = self._info_axis.get_loc(item) + arraylike = value._values + + old = self._ixs(loc, axis=1) + if old._values is value._values and inplace: + # GH#46149 avoid making unnecessary copies/block-splitting + return + + self._mgr.iset(loc, arraylike, inplace=inplace) + + # ---------------------------------------------------------------------- + # Unsorted + + @overload + def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: + ... + + @overload + def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: + ... + + @overload + def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: + ... + + def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: + """ + Query the columns of a DataFrame with a boolean expression. + + Parameters + ---------- + expr : str + The query string to evaluate. + + You can refer to variables + in the environment by prefixing them with an '@' character like + ``@a + b``. + + You can refer to column names that are not valid Python variable names + by surrounding them in backticks. Thus, column names containing spaces + or punctuations (besides underscores) or starting with digits must be + surrounded by backticks. (For example, a column named "Area (cm^2)" would + be referenced as ```Area (cm^2)```). Column names which are Python keywords + (like "list", "for", "import", etc) cannot be used. + + For example, if one of your columns is called ``a a`` and you want + to sum it with ``b``, your query should be ```a a` + b``. + + inplace : bool + Whether to modify the DataFrame rather than creating a new one. + **kwargs + See the documentation for :func:`eval` for complete details + on the keyword arguments accepted by :meth:`DataFrame.query`. + + Returns + ------- + DataFrame or None + DataFrame resulting from the provided query expression or + None if ``inplace=True``. + + See Also + -------- + eval : Evaluate a string describing operations on + DataFrame columns. + DataFrame.eval : Evaluate a string describing operations on + DataFrame columns. + + Notes + ----- + The result of the evaluation of this expression is first passed to + :attr:`DataFrame.loc` and if that fails because of a + multidimensional key (e.g., a DataFrame) then the result will be passed + to :meth:`DataFrame.__getitem__`. + + This method uses the top-level :func:`eval` function to + evaluate the passed query. + + The :meth:`~pandas.DataFrame.query` method uses a slightly + modified Python syntax by default. For example, the ``&`` and ``|`` + (bitwise) operators have the precedence of their boolean cousins, + :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, + however the semantics are different. + + You can change the semantics of the expression by passing the keyword + argument ``parser='python'``. This enforces the same semantics as + evaluation in Python space. Likewise, you can pass ``engine='python'`` + to evaluate an expression using Python itself as a backend. This is not + recommended as it is inefficient compared to using ``numexpr`` as the + engine. + + The :attr:`DataFrame.index` and + :attr:`DataFrame.columns` attributes of the + :class:`~pandas.DataFrame` instance are placed in the query namespace + by default, which allows you to treat both the index and columns of the + frame as a column in the frame. + The identifier ``index`` is used for the frame index; you can also + use the name of the index to identify it in a query. Please note that + Python keywords may not be used as identifiers. + + For further details and examples see the ``query`` documentation in + :ref:`indexing `. + + *Backtick quoted variables* + + Backtick quoted variables are parsed as literal Python code and + are converted internally to a Python valid identifier. + This can lead to the following problems. + + During parsing a number of disallowed characters inside the backtick + quoted string are replaced by strings that are allowed as a Python identifier. + These characters include all operators in Python, the space character, the + question mark, the exclamation mark, the dollar sign, and the euro sign. + For other characters that fall outside the ASCII range (U+0001..U+007F) + and those that are not further specified in PEP 3131, + the query parser will raise an error. + This excludes whitespace different than the space character, + but also the hashtag (as it is used for comments) and the backtick + itself (backtick can also not be escaped). + + In a special case, quotes that make a pair around a backtick can + confuse the parser. + For example, ```it's` > `that's``` will raise an error, + as it forms a quoted string (``'s > `that'``) with a backtick inside. + + See also the Python documentation about lexical analysis + (https://docs.python.org/3/reference/lexical_analysis.html) + in combination with the source code in :mod:`pandas.core.computation.parsing`. + + Examples + -------- + >>> df = pd.DataFrame({'A': range(1, 6), + ... 'B': range(10, 0, -2), + ... 'C C': range(10, 5, -1)}) + >>> df + A B C C + 0 1 10 10 + 1 2 8 9 + 2 3 6 8 + 3 4 4 7 + 4 5 2 6 + >>> df.query('A > B') + A B C C + 4 5 2 6 + + The previous expression is equivalent to + + >>> df[df.A > df.B] + A B C C + 4 5 2 6 + + For columns with spaces in their name, you can use backtick quoting. + + >>> df.query('B == `C C`') + A B C C + 0 1 10 10 + + The previous expression is equivalent to + + >>> df[df.B == df['C C']] + A B C C + 0 1 10 10 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + if not isinstance(expr, str): + msg = f"expr must be a string to be evaluated, {type(expr)} given" + raise ValueError(msg) + kwargs["level"] = kwargs.pop("level", 0) + 1 + kwargs["target"] = None + res = self.eval(expr, **kwargs) + + try: + result = self.loc[res] + except ValueError: + # when res is multi-dimensional loc raises, but this is sometimes a + # valid query + result = self[res] + + if inplace: + self._update_inplace(result) + return None + else: + return result + + @overload + def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: + ... + + @overload + def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: + ... + + def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: + """ + Evaluate a string describing operations on DataFrame columns. + + Operates on columns only, not specific rows or elements. This allows + `eval` to run arbitrary code, which can make you vulnerable to code + injection if you pass user input to this function. + + Parameters + ---------- + expr : str + The expression string to evaluate. + inplace : bool, default False + If the expression contains an assignment, whether to perform the + operation inplace and mutate the existing DataFrame. Otherwise, + a new DataFrame is returned. + **kwargs + See the documentation for :func:`eval` for complete details + on the keyword arguments accepted by + :meth:`~pandas.DataFrame.query`. + + Returns + ------- + ndarray, scalar, pandas object, or None + The result of the evaluation or None if ``inplace=True``. + + See Also + -------- + DataFrame.query : Evaluates a boolean expression to query the columns + of a frame. + DataFrame.assign : Can evaluate an expression or function to create new + values for a column. + eval : Evaluate a Python expression as a string using various + backends. + + Notes + ----- + For more details see the API documentation for :func:`~eval`. + For detailed examples see :ref:`enhancing performance with eval + `. + + Examples + -------- + >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) + >>> df + A B + 0 1 10 + 1 2 8 + 2 3 6 + 3 4 4 + 4 5 2 + >>> df.eval('A + B') + 0 11 + 1 10 + 2 9 + 3 8 + 4 7 + dtype: int64 + + Assignment is allowed though by default the original DataFrame is not + modified. + + >>> df.eval('C = A + B') + A B C + 0 1 10 11 + 1 2 8 10 + 2 3 6 9 + 3 4 4 8 + 4 5 2 7 + >>> df + A B + 0 1 10 + 1 2 8 + 2 3 6 + 3 4 4 + 4 5 2 + + Multiple columns can be assigned to using multi-line expressions: + + >>> df.eval( + ... ''' + ... C = A + B + ... D = A - B + ... ''' + ... ) + A B C D + 0 1 10 11 -9 + 1 2 8 10 -6 + 2 3 6 9 -3 + 3 4 4 8 0 + 4 5 2 7 3 + """ + from pandas.core.computation.eval import eval as _eval + + inplace = validate_bool_kwarg(inplace, "inplace") + kwargs["level"] = kwargs.pop("level", 0) + 1 + index_resolvers = self._get_index_resolvers() + column_resolvers = self._get_cleaned_column_resolvers() + resolvers = column_resolvers, index_resolvers + if "target" not in kwargs: + kwargs["target"] = self + kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers + + return _eval(expr, inplace=inplace, **kwargs) + + def select_dtypes(self, include=None, exclude=None) -> Self: + """ + Return a subset of the DataFrame's columns based on the column dtypes. + + Parameters + ---------- + include, exclude : scalar or list-like + A selection of dtypes or strings to be included/excluded. At least + one of these parameters must be supplied. + + Returns + ------- + DataFrame + The subset of the frame including the dtypes in ``include`` and + excluding the dtypes in ``exclude``. + + Raises + ------ + ValueError + * If both of ``include`` and ``exclude`` are empty + * If ``include`` and ``exclude`` have overlapping elements + * If any kind of string dtype is passed in. + + See Also + -------- + DataFrame.dtypes: Return Series with the data type of each column. + + Notes + ----- + * To select all *numeric* types, use ``np.number`` or ``'number'`` + * To select strings you must use the ``object`` dtype, but note that + this will return *all* object dtype columns + * See the `numpy dtype hierarchy + `__ + * To select datetimes, use ``np.datetime64``, ``'datetime'`` or + ``'datetime64'`` + * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or + ``'timedelta64'`` + * To select Pandas categorical dtypes, use ``'category'`` + * To select Pandas datetimetz dtypes, use ``'datetimetz'`` + or ``'datetime64[ns, tz]'`` + + Examples + -------- + >>> df = pd.DataFrame({'a': [1, 2] * 3, + ... 'b': [True, False] * 3, + ... 'c': [1.0, 2.0] * 3}) + >>> df + a b c + 0 1 True 1.0 + 1 2 False 2.0 + 2 1 True 1.0 + 3 2 False 2.0 + 4 1 True 1.0 + 5 2 False 2.0 + + >>> df.select_dtypes(include='bool') + b + 0 True + 1 False + 2 True + 3 False + 4 True + 5 False + + >>> df.select_dtypes(include=['float64']) + c + 0 1.0 + 1 2.0 + 2 1.0 + 3 2.0 + 4 1.0 + 5 2.0 + + >>> df.select_dtypes(exclude=['int64']) + b c + 0 True 1.0 + 1 False 2.0 + 2 True 1.0 + 3 False 2.0 + 4 True 1.0 + 5 False 2.0 + """ + if not is_list_like(include): + include = (include,) if include is not None else () + if not is_list_like(exclude): + exclude = (exclude,) if exclude is not None else () + + selection = (frozenset(include), frozenset(exclude)) + + if not any(selection): + raise ValueError("at least one of include or exclude must be nonempty") + + # convert the myriad valid dtypes object to a single representation + def check_int_infer_dtype(dtypes): + converted_dtypes: list[type] = [] + for dtype in dtypes: + # Numpy maps int to different types (int32, in64) on Windows and Linux + # see https://github.com/numpy/numpy/issues/9464 + if (isinstance(dtype, str) and dtype == "int") or (dtype is int): + converted_dtypes.append(np.int32) + converted_dtypes.append(np.int64) + elif dtype == "float" or dtype is float: + # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 + converted_dtypes.extend([np.float64, np.float32]) + else: + converted_dtypes.append(infer_dtype_from_object(dtype)) + return frozenset(converted_dtypes) + + include = check_int_infer_dtype(include) + exclude = check_int_infer_dtype(exclude) + + for dtypes in (include, exclude): + invalidate_string_dtypes(dtypes) + + # can't both include AND exclude! + if not include.isdisjoint(exclude): + raise ValueError(f"include and exclude overlap on {(include & exclude)}") + + def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: + # GH 46870: BooleanDtype._is_numeric == True but should be excluded + dtype = dtype if not isinstance(dtype, ArrowDtype) else dtype.numpy_dtype + return issubclass(dtype.type, tuple(dtypes_set)) or ( + np.number in dtypes_set + and getattr(dtype, "_is_numeric", False) + and not is_bool_dtype(dtype) + ) + + def predicate(arr: ArrayLike) -> bool: + dtype = arr.dtype + if include: + if not dtype_predicate(dtype, include): + return False + + if exclude: + if dtype_predicate(dtype, exclude): + return False + + return True + + mgr = self._mgr._get_data_subset(predicate).copy(deep=None) + # error: Incompatible return value type (got "DataFrame", expected "Self") + return self._constructor_from_mgr(mgr, axes=mgr.axes).__finalize__(self) # type: ignore[return-value] + + def insert( + self, + loc: int, + column: Hashable, + value: Scalar | AnyArrayLike, + allow_duplicates: bool | lib.NoDefault = lib.no_default, + ) -> None: + """ + Insert column into DataFrame at specified location. + + Raises a ValueError if `column` is already contained in the DataFrame, + unless `allow_duplicates` is set to True. + + Parameters + ---------- + loc : int + Insertion index. Must verify 0 <= loc <= len(columns). + column : str, number, or hashable object + Label of the inserted column. + value : Scalar, Series, or array-like + Content of the inserted column. + allow_duplicates : bool, optional, default lib.no_default + Allow duplicate column labels to be created. + + See Also + -------- + Index.insert : Insert new item by index. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df + col1 col2 + 0 1 3 + 1 2 4 + >>> df.insert(1, "newcol", [99, 99]) + >>> df + col1 newcol col2 + 0 1 99 3 + 1 2 99 4 + >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) + >>> df + col1 col1 newcol col2 + 0 100 1 99 3 + 1 100 2 99 4 + + Notice that pandas uses index alignment in case of `value` from type `Series`: + + >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) + >>> df + col0 col1 col1 newcol col2 + 0 NaN 100 1 99 3 + 1 5.0 100 2 99 4 + """ + if allow_duplicates is lib.no_default: + allow_duplicates = False + if allow_duplicates and not self.flags.allows_duplicate_labels: + raise ValueError( + "Cannot specify 'allow_duplicates=True' when " + "'self.flags.allows_duplicate_labels' is False." + ) + if not allow_duplicates and column in self.columns: + # Should this be a different kind of error?? + raise ValueError(f"cannot insert {column}, already exists") + if not is_integer(loc): + raise TypeError("loc must be int") + # convert non stdlib ints to satisfy typing checks + loc = int(loc) + if isinstance(value, DataFrame) and len(value.columns) > 1: + raise ValueError( + f"Expected a one-dimensional object, got a DataFrame with " + f"{len(value.columns)} columns instead." + ) + elif isinstance(value, DataFrame): + value = value.iloc[:, 0] + + value, refs = self._sanitize_column(value) + self._mgr.insert(loc, column, value, refs=refs) + + def assign(self, **kwargs) -> DataFrame: + r""" + Assign new columns to a DataFrame. + + Returns a new object with all original columns in addition to new ones. + Existing columns that are re-assigned will be overwritten. + + Parameters + ---------- + **kwargs : dict of {str: callable or Series} + The column names are keywords. If the values are + callable, they are computed on the DataFrame and + assigned to the new columns. The callable must not + change input DataFrame (though pandas doesn't check it). + If the values are not callable, (e.g. a Series, scalar, or array), + they are simply assigned. + + Returns + ------- + DataFrame + A new DataFrame with the new columns in addition to + all the existing columns. + + Notes + ----- + Assigning multiple columns within the same ``assign`` is possible. + Later items in '\*\*kwargs' may refer to newly created or modified + columns in 'df'; items are computed and assigned into 'df' in order. + + Examples + -------- + >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, + ... index=['Portland', 'Berkeley']) + >>> df + temp_c + Portland 17.0 + Berkeley 25.0 + + Where the value is a callable, evaluated on `df`: + + >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) + temp_c temp_f + Portland 17.0 62.6 + Berkeley 25.0 77.0 + + Alternatively, the same behavior can be achieved by directly + referencing an existing Series or sequence: + + >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) + temp_c temp_f + Portland 17.0 62.6 + Berkeley 25.0 77.0 + + You can create multiple columns within the same assign where one + of the columns depends on another one defined within the same assign: + + >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, + ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) + temp_c temp_f temp_k + Portland 17.0 62.6 290.15 + Berkeley 25.0 77.0 298.15 + """ + data = self.copy(deep=None) + + for k, v in kwargs.items(): + data[k] = com.apply_if_callable(v, data) + return data + + def _sanitize_column(self, value) -> tuple[ArrayLike, BlockValuesRefs | None]: + """ + Ensures new columns (which go into the BlockManager as new blocks) are + always copied (or a reference is being tracked to them under CoW) + and converted into an array. + + Parameters + ---------- + value : scalar, Series, or array-like + + Returns + ------- + tuple of numpy.ndarray or ExtensionArray and optional BlockValuesRefs + """ + self._ensure_valid_index(value) + + # Using a DataFrame would mean coercing values to one dtype + assert not isinstance(value, DataFrame) + if is_dict_like(value): + if not isinstance(value, Series): + value = Series(value) + return _reindex_for_setitem(value, self.index) + + if is_list_like(value): + com.require_length_match(value, self.index) + arr = sanitize_array(value, self.index, copy=True, allow_2d=True) + if ( + isinstance(value, Index) + and value.dtype == "object" + and arr.dtype != value.dtype + ): # + # TODO: Remove kludge in sanitize_array for string mode when enforcing + # this deprecation + warnings.warn( + "Setting an Index with object dtype into a DataFrame will stop " + "inferring another dtype in a future version. Cast the Index " + "explicitly before setting it into the DataFrame.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return arr, None + + @property + def _series(self): + return {item: self._ixs(idx, axis=1) for idx, item in enumerate(self.columns)} + + # ---------------------------------------------------------------------- + # Reindexing and alignment + + def _reindex_multi( + self, axes: dict[str, Index], copy: bool, fill_value + ) -> DataFrame: + """ + We are guaranteed non-Nones in the axes. + """ + + new_index, row_indexer = self.index.reindex(axes["index"]) + new_columns, col_indexer = self.columns.reindex(axes["columns"]) + + if row_indexer is not None and col_indexer is not None: + # Fastpath. By doing two 'take's at once we avoid making an + # unnecessary copy. + # We only get here with `self._can_fast_transpose`, which (almost) + # ensures that self.values is cheap. It may be worth making this + # condition more specific. + indexer = row_indexer, col_indexer + new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) + return self._constructor( + new_values, index=new_index, columns=new_columns, copy=False + ) + else: + return self._reindex_with_indexers( + {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, + copy=copy, + fill_value=fill_value, + ) + + @Appender( + """ + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + + Change the row labels. + + >>> df.set_axis(['a', 'b', 'c'], axis='index') + A B + a 1 4 + b 2 5 + c 3 6 + + Change the column labels. + + >>> df.set_axis(['I', 'II'], axis='columns') + I II + 0 1 4 + 1 2 5 + 2 3 6 + """ + ) + @Substitution( + klass=_shared_doc_kwargs["klass"], + axes_single_arg=_shared_doc_kwargs["axes_single_arg"], + extended_summary_sub=" column or", + axis_description_sub=", and 1 identifies the columns", + see_also_sub=" or columns", + ) + @Appender(NDFrame.set_axis.__doc__) + def set_axis( + self, + labels, + *, + axis: Axis = 0, + copy: bool | None = None, + ) -> DataFrame: + return super().set_axis(labels, axis=axis, copy=copy) + + @doc( + NDFrame.reindex, + klass=_shared_doc_kwargs["klass"], + optional_reindex=_shared_doc_kwargs["optional_reindex"], + ) + def reindex( + self, + labels=None, + *, + index=None, + columns=None, + axis: Axis | None = None, + method: ReindexMethod | None = None, + copy: bool | None = None, + level: Level | None = None, + fill_value: Scalar | None = np.nan, + limit: int | None = None, + tolerance=None, + ) -> DataFrame: + return super().reindex( + labels=labels, + index=index, + columns=columns, + axis=axis, + method=method, + copy=copy, + level=level, + fill_value=fill_value, + limit=limit, + tolerance=tolerance, + ) + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level = ..., + inplace: Literal[True], + errors: IgnoreRaise = ..., + ) -> None: + ... + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level = ..., + inplace: Literal[False] = ..., + errors: IgnoreRaise = ..., + ) -> DataFrame: + ... + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level = ..., + inplace: bool = ..., + errors: IgnoreRaise = ..., + ) -> DataFrame | None: + ... + + def drop( + self, + labels: IndexLabel | None = None, + *, + axis: Axis = 0, + index: IndexLabel | None = None, + columns: IndexLabel | None = None, + level: Level | None = None, + inplace: bool = False, + errors: IgnoreRaise = "raise", + ) -> DataFrame | None: + """ + Drop specified labels from rows or columns. + + Remove rows or columns by specifying label names and corresponding + axis, or by directly specifying index or column names. When using a + multi-index, labels on different levels can be removed by specifying + the level. See the :ref:`user guide ` + for more information about the now unused levels. + + Parameters + ---------- + labels : single label or list-like + Index or column labels to drop. A tuple will be used as a single + label and not treated as a list-like. + axis : {0 or 'index', 1 or 'columns'}, default 0 + Whether to drop labels from the index (0 or 'index') or + columns (1 or 'columns'). + index : single label or list-like + Alternative to specifying axis (``labels, axis=0`` + is equivalent to ``index=labels``). + columns : single label or list-like + Alternative to specifying axis (``labels, axis=1`` + is equivalent to ``columns=labels``). + level : int or level name, optional + For MultiIndex, level from which the labels will be removed. + inplace : bool, default False + If False, return a copy. Otherwise, do operation + in place and return None. + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and only existing labels are + dropped. + + Returns + ------- + DataFrame or None + Returns DataFrame or None DataFrame with the specified + index or column labels removed or None if inplace=True. + + Raises + ------ + KeyError + If any of the labels is not found in the selected axis. + + See Also + -------- + DataFrame.loc : Label-location based indexer for selection by label. + DataFrame.dropna : Return DataFrame with labels on given axis omitted + where (all or any) data are missing. + DataFrame.drop_duplicates : Return DataFrame with duplicate rows + removed, optionally only considering certain columns. + Series.drop : Return Series with specified index labels removed. + + Examples + -------- + >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), + ... columns=['A', 'B', 'C', 'D']) + >>> df + A B C D + 0 0 1 2 3 + 1 4 5 6 7 + 2 8 9 10 11 + + Drop columns + + >>> df.drop(['B', 'C'], axis=1) + A D + 0 0 3 + 1 4 7 + 2 8 11 + + >>> df.drop(columns=['B', 'C']) + A D + 0 0 3 + 1 4 7 + 2 8 11 + + Drop a row by index + + >>> df.drop([0, 1]) + A B C D + 2 8 9 10 11 + + Drop columns and/or rows of MultiIndex DataFrame + + >>> midx = pd.MultiIndex(levels=[['llama', 'cow', 'falcon'], + ... ['speed', 'weight', 'length']], + ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], + ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) + >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], + ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], + ... [250, 150], [1.5, 0.8], [320, 250], + ... [1, 0.8], [0.3, 0.2]]) + >>> df + big small + llama speed 45.0 30.0 + weight 200.0 100.0 + length 1.5 1.0 + cow speed 30.0 20.0 + weight 250.0 150.0 + length 1.5 0.8 + falcon speed 320.0 250.0 + weight 1.0 0.8 + length 0.3 0.2 + + Drop a specific index combination from the MultiIndex + DataFrame, i.e., drop the combination ``'falcon'`` and + ``'weight'``, which deletes only the corresponding row + + >>> df.drop(index=('falcon', 'weight')) + big small + llama speed 45.0 30.0 + weight 200.0 100.0 + length 1.5 1.0 + cow speed 30.0 20.0 + weight 250.0 150.0 + length 1.5 0.8 + falcon speed 320.0 250.0 + length 0.3 0.2 + + >>> df.drop(index='cow', columns='small') + big + llama speed 45.0 + weight 200.0 + length 1.5 + falcon speed 320.0 + weight 1.0 + length 0.3 + + >>> df.drop(index='length', level=1) + big small + llama speed 45.0 30.0 + weight 200.0 100.0 + cow speed 30.0 20.0 + weight 250.0 150.0 + falcon speed 320.0 250.0 + weight 1.0 0.8 + """ + return super().drop( + labels=labels, + axis=axis, + index=index, + columns=columns, + level=level, + inplace=inplace, + errors=errors, + ) + + @overload + def rename( + self, + mapper: Renamer | None = ..., + *, + index: Renamer | None = ..., + columns: Renamer | None = ..., + axis: Axis | None = ..., + copy: bool | None = ..., + inplace: Literal[True], + level: Level = ..., + errors: IgnoreRaise = ..., + ) -> None: + ... + + @overload + def rename( + self, + mapper: Renamer | None = ..., + *, + index: Renamer | None = ..., + columns: Renamer | None = ..., + axis: Axis | None = ..., + copy: bool | None = ..., + inplace: Literal[False] = ..., + level: Level = ..., + errors: IgnoreRaise = ..., + ) -> DataFrame: + ... + + @overload + def rename( + self, + mapper: Renamer | None = ..., + *, + index: Renamer | None = ..., + columns: Renamer | None = ..., + axis: Axis | None = ..., + copy: bool | None = ..., + inplace: bool = ..., + level: Level = ..., + errors: IgnoreRaise = ..., + ) -> DataFrame | None: + ... + + def rename( + self, + mapper: Renamer | None = None, + *, + index: Renamer | None = None, + columns: Renamer | None = None, + axis: Axis | None = None, + copy: bool | None = None, + inplace: bool = False, + level: Level | None = None, + errors: IgnoreRaise = "ignore", + ) -> DataFrame | None: + """ + Rename columns or index labels. + + Function / dict values must be unique (1-to-1). Labels not contained in + a dict / Series will be left as-is. Extra labels listed don't throw an + error. + + See the :ref:`user guide ` for more. + + Parameters + ---------- + mapper : dict-like or function + Dict-like or function transformations to apply to + that axis' values. Use either ``mapper`` and ``axis`` to + specify the axis to target with ``mapper``, or ``index`` and + ``columns``. + index : dict-like or function + Alternative to specifying axis (``mapper, axis=0`` + is equivalent to ``index=mapper``). + columns : dict-like or function + Alternative to specifying axis (``mapper, axis=1`` + is equivalent to ``columns=mapper``). + axis : {0 or 'index', 1 or 'columns'}, default 0 + Axis to target with ``mapper``. Can be either the axis name + ('index', 'columns') or number (0, 1). The default is 'index'. + copy : bool, default True + Also copy underlying data. + + .. note:: + The `copy` keyword will change behavior in pandas 3.0. + `Copy-on-Write + `__ + will be enabled by default, which means that all methods with a + `copy` keyword will use a lazy copy mechanism to defer the copy and + ignore the `copy` keyword. The `copy` keyword will be removed in a + future version of pandas. + + You can already get the future behavior and improvements through + enabling copy on write ``pd.options.mode.copy_on_write = True`` + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one. + If True then value of copy is ignored. + level : int or level name, default None + In case of a MultiIndex, only rename labels in the specified + level. + errors : {'ignore', 'raise'}, default 'ignore' + If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, + or `columns` contains labels that are not present in the Index + being transformed. + If 'ignore', existing keys will be renamed and extra keys will be + ignored. + + Returns + ------- + DataFrame or None + DataFrame with the renamed axis labels or None if ``inplace=True``. + + Raises + ------ + KeyError + If any of the labels is not found in the selected axis and + "errors='raise'". + + See Also + -------- + DataFrame.rename_axis : Set the name of the axis. + + Examples + -------- + ``DataFrame.rename`` supports two calling conventions + + * ``(index=index_mapper, columns=columns_mapper, ...)`` + * ``(mapper, axis={'index', 'columns'}, ...)`` + + We *highly* recommend using keyword arguments to clarify your + intent. + + Rename columns using a mapping: + + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + >>> df.rename(columns={"A": "a", "B": "c"}) + a c + 0 1 4 + 1 2 5 + 2 3 6 + + Rename index using a mapping: + + >>> df.rename(index={0: "x", 1: "y", 2: "z"}) + A B + x 1 4 + y 2 5 + z 3 6 + + Cast index labels to a different type: + + >>> df.index + RangeIndex(start=0, stop=3, step=1) + >>> df.rename(index=str).index + Index(['0', '1', '2'], dtype='object') + + >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") + Traceback (most recent call last): + KeyError: ['C'] not found in axis + + Using axis-style parameters: + + >>> df.rename(str.lower, axis='columns') + a b + 0 1 4 + 1 2 5 + 2 3 6 + + >>> df.rename({1: 2, 2: 4}, axis='index') + A B + 0 1 4 + 2 2 5 + 4 3 6 + """ + return super()._rename( + mapper=mapper, + index=index, + columns=columns, + axis=axis, + copy=copy, + inplace=inplace, + level=level, + errors=errors, + ) + + def pop(self, item: Hashable) -> Series: + """ + Return item and drop from frame. Raise KeyError if not found. + + Parameters + ---------- + item : label + Label of column to be popped. + + Returns + ------- + Series + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey', 'mammal', np.nan)], + ... columns=('name', 'class', 'max_speed')) + >>> df + name class max_speed + 0 falcon bird 389.0 + 1 parrot bird 24.0 + 2 lion mammal 80.5 + 3 monkey mammal NaN + + >>> df.pop('class') + 0 bird + 1 bird + 2 mammal + 3 mammal + Name: class, dtype: object + + >>> df + name max_speed + 0 falcon 389.0 + 1 parrot 24.0 + 2 lion 80.5 + 3 monkey NaN + """ + return super().pop(item=item) + + def _replace_columnwise( + self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex + ): + """ + Dispatch to Series.replace column-wise. + + Parameters + ---------- + mapping : dict + of the form {col: (target, value)} + inplace : bool + regex : bool or same types as `to_replace` in DataFrame.replace + + Returns + ------- + DataFrame or None + """ + # Operate column-wise + res = self if inplace else self.copy(deep=None) + ax = self.columns + + for i, ax_value in enumerate(ax): + if ax_value in mapping: + ser = self.iloc[:, i] + + target, value = mapping[ax_value] + newobj = ser.replace(target, value, regex=regex) + + res._iset_item(i, newobj, inplace=inplace) + + if inplace: + return + return res.__finalize__(self) + + @doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) + def shift( + self, + periods: int | Sequence[int] = 1, + freq: Frequency | None = None, + axis: Axis = 0, + fill_value: Hashable = lib.no_default, + suffix: str | None = None, + ) -> DataFrame: + if freq is not None and fill_value is not lib.no_default: + # GH#53832 + warnings.warn( + "Passing a 'freq' together with a 'fill_value' silently ignores " + "the fill_value and is deprecated. This will raise in a future " + "version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + fill_value = lib.no_default + + if self.empty: + return self.copy() + + axis = self._get_axis_number(axis) + + if is_list_like(periods): + periods = cast(Sequence, periods) + if axis == 1: + raise ValueError( + "If `periods` contains multiple shifts, `axis` cannot be 1." + ) + if len(periods) == 0: + raise ValueError("If `periods` is an iterable, it cannot be empty.") + from pandas.core.reshape.concat import concat + + shifted_dataframes = [] + for period in periods: + if not is_integer(period): + raise TypeError( + f"Periods must be integer, but {period} is {type(period)}." + ) + period = cast(int, period) + shifted_dataframes.append( + super() + .shift(periods=period, freq=freq, axis=axis, fill_value=fill_value) + .add_suffix(f"{suffix}_{period}" if suffix else f"_{period}") + ) + return concat(shifted_dataframes, axis=1) + elif suffix: + raise ValueError("Cannot specify `suffix` if `periods` is an int.") + periods = cast(int, periods) + + ncols = len(self.columns) + arrays = self._mgr.arrays + if axis == 1 and periods != 0 and ncols > 0 and freq is None: + if fill_value is lib.no_default: + # We will infer fill_value to match the closest column + + # Use a column that we know is valid for our column's dtype GH#38434 + label = self.columns[0] + + if periods > 0: + result = self.iloc[:, :-periods] + for col in range(min(ncols, abs(periods))): + # TODO(EA2D): doing this in a loop unnecessary with 2D EAs + # Define filler inside loop so we get a copy + filler = self.iloc[:, 0].shift(len(self)) + result.insert(0, label, filler, allow_duplicates=True) + else: + result = self.iloc[:, -periods:] + for col in range(min(ncols, abs(periods))): + # Define filler inside loop so we get a copy + filler = self.iloc[:, -1].shift(len(self)) + result.insert( + len(result.columns), label, filler, allow_duplicates=True + ) + + result.columns = self.columns.copy() + return result + elif len(arrays) > 1 or ( + # If we only have one block and we know that we can't + # keep the same dtype (i.e. the _can_hold_element check) + # then we can go through the reindex_indexer path + # (and avoid casting logic in the Block method). + not can_hold_element(arrays[0], fill_value) + ): + # GH#35488 we need to watch out for multi-block cases + # We only get here with fill_value not-lib.no_default + nper = abs(periods) + nper = min(nper, ncols) + if periods > 0: + indexer = np.array( + [-1] * nper + list(range(ncols - periods)), dtype=np.intp + ) + else: + indexer = np.array( + list(range(nper, ncols)) + [-1] * nper, dtype=np.intp + ) + mgr = self._mgr.reindex_indexer( + self.columns, + indexer, + axis=0, + fill_value=fill_value, + allow_dups=True, + ) + res_df = self._constructor_from_mgr(mgr, axes=mgr.axes) + return res_df.__finalize__(self, method="shift") + else: + return self.T.shift(periods=periods, fill_value=fill_value).T + + return super().shift( + periods=periods, freq=freq, axis=axis, fill_value=fill_value + ) + + @overload + def set_index( + self, + keys, + *, + drop: bool = ..., + append: bool = ..., + inplace: Literal[False] = ..., + verify_integrity: bool = ..., + ) -> DataFrame: + ... + + @overload + def set_index( + self, + keys, + *, + drop: bool = ..., + append: bool = ..., + inplace: Literal[True], + verify_integrity: bool = ..., + ) -> None: + ... + + def set_index( + self, + keys, + *, + drop: bool = True, + append: bool = False, + inplace: bool = False, + verify_integrity: bool = False, + ) -> DataFrame | None: + """ + Set the DataFrame index using existing columns. + + Set the DataFrame index (row labels) using one or more existing + columns or arrays (of the correct length). The index can replace the + existing index or expand on it. + + Parameters + ---------- + keys : label or array-like or list of labels/arrays + This parameter can be either a single column key, a single array of + the same length as the calling DataFrame, or a list containing an + arbitrary combination of column keys and arrays. Here, "array" + encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and + instances of :class:`~collections.abc.Iterator`. + drop : bool, default True + Delete columns to be used as the new index. + append : bool, default False + Whether to append columns to existing index. + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one. + verify_integrity : bool, default False + Check the new index for duplicates. Otherwise defer the check until + necessary. Setting to False will improve the performance of this + method. + + Returns + ------- + DataFrame or None + Changed row labels or None if ``inplace=True``. + + See Also + -------- + DataFrame.reset_index : Opposite of set_index. + DataFrame.reindex : Change to new indices or expand indices. + DataFrame.reindex_like : Change to same indices as other DataFrame. + + Examples + -------- + >>> df = pd.DataFrame({'month': [1, 4, 7, 10], + ... 'year': [2012, 2014, 2013, 2014], + ... 'sale': [55, 40, 84, 31]}) + >>> df + month year sale + 0 1 2012 55 + 1 4 2014 40 + 2 7 2013 84 + 3 10 2014 31 + + Set the index to become the 'month' column: + + >>> df.set_index('month') + year sale + month + 1 2012 55 + 4 2014 40 + 7 2013 84 + 10 2014 31 + + Create a MultiIndex using columns 'year' and 'month': + + >>> df.set_index(['year', 'month']) + sale + year month + 2012 1 55 + 2014 4 40 + 2013 7 84 + 2014 10 31 + + Create a MultiIndex using an Index and a column: + + >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year']) + month sale + year + 1 2012 1 55 + 2 2014 4 40 + 3 2013 7 84 + 4 2014 10 31 + + Create a MultiIndex using two Series: + + >>> s = pd.Series([1, 2, 3, 4]) + >>> df.set_index([s, s**2]) + month year sale + 1 1 1 2012 55 + 2 4 4 2014 40 + 3 9 7 2013 84 + 4 16 10 2014 31 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + self._check_inplace_and_allows_duplicate_labels(inplace) + if not isinstance(keys, list): + keys = [keys] + + err_msg = ( + 'The parameter "keys" may be a column key, one-dimensional ' + "array, or a list containing only valid column keys and " + "one-dimensional arrays." + ) + + missing: list[Hashable] = [] + for col in keys: + if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)): + # arrays are fine as long as they are one-dimensional + # iterators get converted to list below + if getattr(col, "ndim", 1) != 1: + raise ValueError(err_msg) + else: + # everything else gets tried as a key; see GH 24969 + try: + found = col in self.columns + except TypeError as err: + raise TypeError( + f"{err_msg}. Received column of type {type(col)}" + ) from err + else: + if not found: + missing.append(col) + + if missing: + raise KeyError(f"None of {missing} are in the columns") + + if inplace: + frame = self + else: + # GH 49473 Use "lazy copy" with Copy-on-Write + frame = self.copy(deep=None) + + arrays: list[Index] = [] + names: list[Hashable] = [] + if append: + names = list(self.index.names) + if isinstance(self.index, MultiIndex): + arrays.extend( + self.index._get_level_values(i) for i in range(self.index.nlevels) + ) + else: + arrays.append(self.index) + + to_remove: list[Hashable] = [] + for col in keys: + if isinstance(col, MultiIndex): + arrays.extend(col._get_level_values(n) for n in range(col.nlevels)) + names.extend(col.names) + elif isinstance(col, (Index, Series)): + # if Index then not MultiIndex (treated above) + + # error: Argument 1 to "append" of "list" has incompatible type + # "Union[Index, Series]"; expected "Index" + arrays.append(col) # type: ignore[arg-type] + names.append(col.name) + elif isinstance(col, (list, np.ndarray)): + # error: Argument 1 to "append" of "list" has incompatible type + # "Union[List[Any], ndarray]"; expected "Index" + arrays.append(col) # type: ignore[arg-type] + names.append(None) + elif isinstance(col, abc.Iterator): + # error: Argument 1 to "append" of "list" has incompatible type + # "List[Any]"; expected "Index" + arrays.append(list(col)) # type: ignore[arg-type] + names.append(None) + # from here, col can only be a column label + else: + arrays.append(frame[col]) + names.append(col) + if drop: + to_remove.append(col) + + if len(arrays[-1]) != len(self): + # check newest element against length of calling frame, since + # ensure_index_from_sequences would not raise for append=False. + raise ValueError( + f"Length mismatch: Expected {len(self)} rows, " + f"received array of length {len(arrays[-1])}" + ) + + index = ensure_index_from_sequences(arrays, names) + + if verify_integrity and not index.is_unique: + duplicates = index[index.duplicated()].unique() + raise ValueError(f"Index has duplicate keys: {duplicates}") + + # use set to handle duplicate column names gracefully in case of drop + for c in set(to_remove): + del frame[c] + + # clear up memory usage + index._cleanup() + + frame.index = index + + if not inplace: + return frame + return None + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: bool = ..., + inplace: Literal[False] = ..., + col_level: Hashable = ..., + col_fill: Hashable = ..., + allow_duplicates: bool | lib.NoDefault = ..., + names: Hashable | Sequence[Hashable] | None = None, + ) -> DataFrame: + ... + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: bool = ..., + inplace: Literal[True], + col_level: Hashable = ..., + col_fill: Hashable = ..., + allow_duplicates: bool | lib.NoDefault = ..., + names: Hashable | Sequence[Hashable] | None = None, + ) -> None: + ... + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: bool = ..., + inplace: bool = ..., + col_level: Hashable = ..., + col_fill: Hashable = ..., + allow_duplicates: bool | lib.NoDefault = ..., + names: Hashable | Sequence[Hashable] | None = None, + ) -> DataFrame | None: + ... + + def reset_index( + self, + level: IndexLabel | None = None, + *, + drop: bool = False, + inplace: bool = False, + col_level: Hashable = 0, + col_fill: Hashable = "", + allow_duplicates: bool | lib.NoDefault = lib.no_default, + names: Hashable | Sequence[Hashable] | None = None, + ) -> DataFrame | None: + """ + Reset the index, or a level of it. + + Reset the index of the DataFrame, and use the default one instead. + If the DataFrame has a MultiIndex, this method can remove one or more + levels. + + Parameters + ---------- + level : int, str, tuple, or list, default None + Only remove the given levels from the index. Removes all levels by + default. + drop : bool, default False + Do not try to insert index into dataframe columns. This resets + the index to the default integer index. + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one. + col_level : int or str, default 0 + If the columns have multiple levels, determines which level the + labels are inserted into. By default it is inserted into the first + level. + col_fill : object, default '' + If the columns have multiple levels, determines how the other + levels are named. If None then the index name is repeated. + allow_duplicates : bool, optional, default lib.no_default + Allow duplicate column labels to be created. + + .. versionadded:: 1.5.0 + + names : int, str or 1-dimensional list, default None + Using the given string, rename the DataFrame column which contains the + index data. If the DataFrame has a MultiIndex, this has to be a list or + tuple with length equal to the number of levels. + + .. versionadded:: 1.5.0 + + Returns + ------- + DataFrame or None + DataFrame with the new index or None if ``inplace=True``. + + See Also + -------- + DataFrame.set_index : Opposite of reset_index. + DataFrame.reindex : Change to new indices or expand indices. + DataFrame.reindex_like : Change to same indices as other DataFrame. + + Examples + -------- + >>> df = pd.DataFrame([('bird', 389.0), + ... ('bird', 24.0), + ... ('mammal', 80.5), + ... ('mammal', np.nan)], + ... index=['falcon', 'parrot', 'lion', 'monkey'], + ... columns=('class', 'max_speed')) + >>> df + class max_speed + falcon bird 389.0 + parrot bird 24.0 + lion mammal 80.5 + monkey mammal NaN + + When we reset the index, the old index is added as a column, and a + new sequential index is used: + + >>> df.reset_index() + index class max_speed + 0 falcon bird 389.0 + 1 parrot bird 24.0 + 2 lion mammal 80.5 + 3 monkey mammal NaN + + We can use the `drop` parameter to avoid the old index being added as + a column: + + >>> df.reset_index(drop=True) + class max_speed + 0 bird 389.0 + 1 bird 24.0 + 2 mammal 80.5 + 3 mammal NaN + + You can also use `reset_index` with `MultiIndex`. + + >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), + ... ('bird', 'parrot'), + ... ('mammal', 'lion'), + ... ('mammal', 'monkey')], + ... names=['class', 'name']) + >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), + ... ('species', 'type')]) + >>> df = pd.DataFrame([(389.0, 'fly'), + ... (24.0, 'fly'), + ... (80.5, 'run'), + ... (np.nan, 'jump')], + ... index=index, + ... columns=columns) + >>> df + speed species + max type + class name + bird falcon 389.0 fly + parrot 24.0 fly + mammal lion 80.5 run + monkey NaN jump + + Using the `names` parameter, choose a name for the index column: + + >>> df.reset_index(names=['classes', 'names']) + classes names speed species + max type + 0 bird falcon 389.0 fly + 1 bird parrot 24.0 fly + 2 mammal lion 80.5 run + 3 mammal monkey NaN jump + + If the index has multiple levels, we can reset a subset of them: + + >>> df.reset_index(level='class') + class speed species + max type + name + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump + + If we are not dropping the index, by default, it is placed in the top + level. We can place it in another level: + + >>> df.reset_index(level='class', col_level=1) + speed species + class max type + name + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump + + When the index is inserted under another level, we can specify under + which one with the parameter `col_fill`: + + >>> df.reset_index(level='class', col_level=1, col_fill='species') + species speed species + class max type + name + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump + + If we specify a nonexistent level for `col_fill`, it is created: + + >>> df.reset_index(level='class', col_level=1, col_fill='genus') + genus speed species + class max type + name + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump + """ + inplace = validate_bool_kwarg(inplace, "inplace") + self._check_inplace_and_allows_duplicate_labels(inplace) + if inplace: + new_obj = self + else: + new_obj = self.copy(deep=None) + if allow_duplicates is not lib.no_default: + allow_duplicates = validate_bool_kwarg(allow_duplicates, "allow_duplicates") + + new_index = default_index(len(new_obj)) + if level is not None: + if not isinstance(level, (tuple, list)): + level = [level] + level = [self.index._get_level_number(lev) for lev in level] + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) + + if not drop: + to_insert: Iterable[tuple[Any, Any | None]] + + default = "index" if "index" not in self else "level_0" + names = self.index._get_default_index_names(names, default) + + if isinstance(self.index, MultiIndex): + to_insert = zip(self.index.levels, self.index.codes) + else: + to_insert = ((self.index, None),) + + multi_col = isinstance(self.columns, MultiIndex) + for i, (lev, lab) in reversed(list(enumerate(to_insert))): + if level is not None and i not in level: + continue + name = names[i] + if multi_col: + col_name = list(name) if isinstance(name, tuple) else [name] + if col_fill is None: + if len(col_name) not in (1, self.columns.nlevels): + raise ValueError( + "col_fill=None is incompatible " + f"with incomplete column name {name}" + ) + col_fill = col_name[0] + + lev_num = self.columns._get_level_number(col_level) + name_lst = [col_fill] * lev_num + col_name + missing = self.columns.nlevels - len(name_lst) + name_lst += [col_fill] * missing + name = tuple(name_lst) + + # to ndarray and maybe infer different dtype + level_values = lev._values + if level_values.dtype == np.object_: + level_values = lib.maybe_convert_objects(level_values) + + if lab is not None: + # if we have the codes, extract the values with a mask + level_values = algorithms.take( + level_values, lab, allow_fill=True, fill_value=lev._na_value + ) + + new_obj.insert( + 0, + name, + level_values, + allow_duplicates=allow_duplicates, + ) + + new_obj.index = new_index + if not inplace: + return new_obj + + return None + + # ---------------------------------------------------------------------- + # Reindex-based selection methods + + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) + def isna(self) -> DataFrame: + res_mgr = self._mgr.isna(func=isna) + result = self._constructor_from_mgr(res_mgr, axes=res_mgr.axes) + return result.__finalize__(self, method="isna") + + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) + def isnull(self) -> DataFrame: + """ + DataFrame.isnull is an alias for DataFrame.isna. + """ + return self.isna() + + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) + def notna(self) -> DataFrame: + return ~self.isna() + + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) + def notnull(self) -> DataFrame: + """ + DataFrame.notnull is an alias for DataFrame.notna. + """ + return ~self.isna() + + @overload + def dropna( + self, + *, + axis: Axis = ..., + how: AnyAll | lib.NoDefault = ..., + thresh: int | lib.NoDefault = ..., + subset: IndexLabel = ..., + inplace: Literal[False] = ..., + ignore_index: bool = ..., + ) -> DataFrame: + ... + + @overload + def dropna( + self, + *, + axis: Axis = ..., + how: AnyAll | lib.NoDefault = ..., + thresh: int | lib.NoDefault = ..., + subset: IndexLabel = ..., + inplace: Literal[True], + ignore_index: bool = ..., + ) -> None: + ... + + def dropna( + self, + *, + axis: Axis = 0, + how: AnyAll | lib.NoDefault = lib.no_default, + thresh: int | lib.NoDefault = lib.no_default, + subset: IndexLabel | None = None, + inplace: bool = False, + ignore_index: bool = False, + ) -> DataFrame | None: + """ + Remove missing values. + + See the :ref:`User Guide ` for more on which values are + considered missing, and how to work with missing data. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + Determine if rows or columns which contain missing values are + removed. + + * 0, or 'index' : Drop rows which contain missing values. + * 1, or 'columns' : Drop columns which contain missing value. + + Only a single axis is allowed. + + how : {'any', 'all'}, default 'any' + Determine if row or column is removed from DataFrame, when we have + at least one NA or all NA. + + * 'any' : If any NA values are present, drop that row or column. + * 'all' : If all values are NA, drop that row or column. + + thresh : int, optional + Require that many non-NA values. Cannot be combined with how. + subset : column label or sequence of labels, optional + Labels along other axis to consider, e.g. if you are dropping rows + these would be a list of columns to include. + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one. + ignore_index : bool, default ``False`` + If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. + + .. versionadded:: 2.0.0 + + Returns + ------- + DataFrame or None + DataFrame with NA entries dropped from it or None if ``inplace=True``. + + See Also + -------- + DataFrame.isna: Indicate missing values. + DataFrame.notna : Indicate existing (non-missing) values. + DataFrame.fillna : Replace missing values. + Series.dropna : Drop missing values. + Index.dropna : Drop missing indices. + + Examples + -------- + >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], + ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], + ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), + ... pd.NaT]}) + >>> df + name toy born + 0 Alfred NaN NaT + 1 Batman Batmobile 1940-04-25 + 2 Catwoman Bullwhip NaT + + Drop the rows where at least one element is missing. + + >>> df.dropna() + name toy born + 1 Batman Batmobile 1940-04-25 + + Drop the columns where at least one element is missing. + + >>> df.dropna(axis='columns') + name + 0 Alfred + 1 Batman + 2 Catwoman + + Drop the rows where all elements are missing. + + >>> df.dropna(how='all') + name toy born + 0 Alfred NaN NaT + 1 Batman Batmobile 1940-04-25 + 2 Catwoman Bullwhip NaT + + Keep only the rows with at least 2 non-NA values. + + >>> df.dropna(thresh=2) + name toy born + 1 Batman Batmobile 1940-04-25 + 2 Catwoman Bullwhip NaT + + Define in which columns to look for missing values. + + >>> df.dropna(subset=['name', 'toy']) + name toy born + 1 Batman Batmobile 1940-04-25 + 2 Catwoman Bullwhip NaT + """ + if (how is not lib.no_default) and (thresh is not lib.no_default): + raise TypeError( + "You cannot set both the how and thresh arguments at the same time." + ) + + if how is lib.no_default: + how = "any" + + inplace = validate_bool_kwarg(inplace, "inplace") + if isinstance(axis, (tuple, list)): + # GH20987 + raise TypeError("supplying multiple axes to axis is no longer supported.") + + axis = self._get_axis_number(axis) + agg_axis = 1 - axis + + agg_obj = self + if subset is not None: + # subset needs to be list + if not is_list_like(subset): + subset = [subset] + ax = self._get_axis(agg_axis) + indices = ax.get_indexer_for(subset) + check = indices == -1 + if check.any(): + raise KeyError(np.array(subset)[check].tolist()) + agg_obj = self.take(indices, axis=agg_axis) + + if thresh is not lib.no_default: + count = agg_obj.count(axis=agg_axis) + mask = count >= thresh + elif how == "any": + # faster equivalent to 'agg_obj.count(agg_axis) == self.shape[agg_axis]' + mask = notna(agg_obj).all(axis=agg_axis, bool_only=False) + elif how == "all": + # faster equivalent to 'agg_obj.count(agg_axis) > 0' + mask = notna(agg_obj).any(axis=agg_axis, bool_only=False) + else: + raise ValueError(f"invalid how option: {how}") + + if np.all(mask): + result = self.copy(deep=None) + else: + result = self.loc(axis=axis)[mask] + + if ignore_index: + result.index = default_index(len(result)) + + if not inplace: + return result + self._update_inplace(result) + return None + + @overload + def drop_duplicates( + self, + subset: Hashable | Sequence[Hashable] | None = ..., + *, + keep: DropKeep = ..., + inplace: Literal[True], + ignore_index: bool = ..., + ) -> None: + ... + + @overload + def drop_duplicates( + self, + subset: Hashable | Sequence[Hashable] | None = ..., + *, + keep: DropKeep = ..., + inplace: Literal[False] = ..., + ignore_index: bool = ..., + ) -> DataFrame: + ... + + @overload + def drop_duplicates( + self, + subset: Hashable | Sequence[Hashable] | None = ..., + *, + keep: DropKeep = ..., + inplace: bool = ..., + ignore_index: bool = ..., + ) -> DataFrame | None: + ... + + def drop_duplicates( + self, + subset: Hashable | Sequence[Hashable] | None = None, + *, + keep: DropKeep = "first", + inplace: bool = False, + ignore_index: bool = False, + ) -> DataFrame | None: + """ + Return DataFrame with duplicate rows removed. + + Considering certain columns is optional. Indexes, including time indexes + are ignored. + + Parameters + ---------- + subset : column label or sequence of labels, optional + Only consider certain columns for identifying duplicates, by + default use all of the columns. + keep : {'first', 'last', ``False``}, default 'first' + Determines which duplicates (if any) to keep. + + - 'first' : Drop duplicates except for the first occurrence. + - 'last' : Drop duplicates except for the last occurrence. + - ``False`` : Drop all duplicates. + + inplace : bool, default ``False`` + Whether to modify the DataFrame rather than creating a new one. + ignore_index : bool, default ``False`` + If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. + + Returns + ------- + DataFrame or None + DataFrame with duplicates removed or None if ``inplace=True``. + + See Also + -------- + DataFrame.value_counts: Count unique combinations of columns. + + Examples + -------- + Consider dataset containing ramen rating. + + >>> df = pd.DataFrame({ + ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], + ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], + ... 'rating': [4, 4, 3.5, 15, 5] + ... }) + >>> df + brand style rating + 0 Yum Yum cup 4.0 + 1 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 3 Indomie pack 15.0 + 4 Indomie pack 5.0 + + By default, it removes duplicate rows based on all columns. + + >>> df.drop_duplicates() + brand style rating + 0 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 3 Indomie pack 15.0 + 4 Indomie pack 5.0 + + To remove duplicates on specific column(s), use ``subset``. + + >>> df.drop_duplicates(subset=['brand']) + brand style rating + 0 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + + To remove duplicates and keep last occurrences, use ``keep``. + + >>> df.drop_duplicates(subset=['brand', 'style'], keep='last') + brand style rating + 1 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 4 Indomie pack 5.0 + """ + if self.empty: + return self.copy(deep=None) + + inplace = validate_bool_kwarg(inplace, "inplace") + ignore_index = validate_bool_kwarg(ignore_index, "ignore_index") + + result = self[-self.duplicated(subset, keep=keep)] + if ignore_index: + result.index = default_index(len(result)) + + if inplace: + self._update_inplace(result) + return None + else: + return result + + def duplicated( + self, + subset: Hashable | Sequence[Hashable] | None = None, + keep: DropKeep = "first", + ) -> Series: + """ + Return boolean Series denoting duplicate rows. + + Considering certain columns is optional. + + Parameters + ---------- + subset : column label or sequence of labels, optional + Only consider certain columns for identifying duplicates, by + default use all of the columns. + keep : {'first', 'last', False}, default 'first' + Determines which duplicates (if any) to mark. + + - ``first`` : Mark duplicates as ``True`` except for the first occurrence. + - ``last`` : Mark duplicates as ``True`` except for the last occurrence. + - False : Mark all duplicates as ``True``. + + Returns + ------- + Series + Boolean series for each duplicated rows. + + See Also + -------- + Index.duplicated : Equivalent method on index. + Series.duplicated : Equivalent method on Series. + Series.drop_duplicates : Remove duplicate values from Series. + DataFrame.drop_duplicates : Remove duplicate values from DataFrame. + + Examples + -------- + Consider dataset containing ramen rating. + + >>> df = pd.DataFrame({ + ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], + ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], + ... 'rating': [4, 4, 3.5, 15, 5] + ... }) + >>> df + brand style rating + 0 Yum Yum cup 4.0 + 1 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 3 Indomie pack 15.0 + 4 Indomie pack 5.0 + + By default, for each set of duplicated values, the first occurrence + is set on False and all others on True. + + >>> df.duplicated() + 0 False + 1 True + 2 False + 3 False + 4 False + dtype: bool + + By using 'last', the last occurrence of each set of duplicated values + is set on False and all others on True. + + >>> df.duplicated(keep='last') + 0 True + 1 False + 2 False + 3 False + 4 False + dtype: bool + + By setting ``keep`` on False, all duplicates are True. + + >>> df.duplicated(keep=False) + 0 True + 1 True + 2 False + 3 False + 4 False + dtype: bool + + To find duplicates on specific column(s), use ``subset``. + + >>> df.duplicated(subset=['brand']) + 0 False + 1 True + 2 False + 3 True + 4 True + dtype: bool + """ + + if self.empty: + return self._constructor_sliced(dtype=bool) + + def f(vals) -> tuple[np.ndarray, int]: + labels, shape = algorithms.factorize(vals, size_hint=len(self)) + return labels.astype("i8", copy=False), len(shape) + + if subset is None: + # https://github.com/pandas-dev/pandas/issues/28770 + # Incompatible types in assignment (expression has type "Index", variable + # has type "Sequence[Any]") + subset = self.columns # type: ignore[assignment] + elif ( + not np.iterable(subset) + or isinstance(subset, str) + or isinstance(subset, tuple) + and subset in self.columns + ): + subset = (subset,) + + # needed for mypy since can't narrow types using np.iterable + subset = cast(Sequence, subset) + + # Verify all columns in subset exist in the queried dataframe + # Otherwise, raise a KeyError, same as if you try to __getitem__ with a + # key that doesn't exist. + diff = set(subset) - set(self.columns) + if diff: + raise KeyError(Index(diff)) + + if len(subset) == 1 and self.columns.is_unique: + # GH#45236 This is faster than get_group_index below + result = self[subset[0]].duplicated(keep) + result.name = None + else: + vals = (col.values for name, col in self.items() if name in subset) + labels, shape = map(list, zip(*map(f, vals))) + + ids = get_group_index(labels, tuple(shape), sort=False, xnull=False) + result = self._constructor_sliced(duplicated(ids, keep), index=self.index) + return result.__finalize__(self, method="duplicated") + + # ---------------------------------------------------------------------- + # Sorting + # error: Signature of "sort_values" incompatible with supertype "NDFrame" + @overload # type: ignore[override] + def sort_values( + self, + by: IndexLabel, + *, + axis: Axis = ..., + ascending=..., + inplace: Literal[False] = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool = ..., + key: ValueKeyFunc = ..., + ) -> DataFrame: + ... + + @overload + def sort_values( + self, + by: IndexLabel, + *, + axis: Axis = ..., + ascending=..., + inplace: Literal[True], + kind: SortKind = ..., + na_position: str = ..., + ignore_index: bool = ..., + key: ValueKeyFunc = ..., + ) -> None: + ... + + def sort_values( + self, + by: IndexLabel, + *, + axis: Axis = 0, + ascending: bool | list[bool] | tuple[bool, ...] = True, + inplace: bool = False, + kind: SortKind = "quicksort", + na_position: str = "last", + ignore_index: bool = False, + key: ValueKeyFunc | None = None, + ) -> DataFrame | None: + """ + Sort by the values along either axis. + + Parameters + ---------- + by : str or list of str + Name or list of names to sort by. + + - if `axis` is 0 or `'index'` then `by` may contain index + levels and/or column labels. + - if `axis` is 1 or `'columns'` then `by` may contain column + levels and/or index labels. + axis : "{0 or 'index', 1 or 'columns'}", default 0 + Axis to be sorted. + ascending : bool or list of bool, default True + Sort ascending vs. descending. Specify list for multiple sort + orders. If this is a list of bools, must match the length of + the by. + inplace : bool, default False + If True, perform operation in-place. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See also :func:`numpy.sort` for more + information. `mergesort` and `stable` are the only stable algorithms. For + DataFrames, this option is only applied when sorting on a single + column or label. + na_position : {'first', 'last'}, default 'last' + Puts NaNs at the beginning if `first`; `last` puts NaNs at the + end. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + key : callable, optional + Apply the key function to the values + before sorting. This is similar to the `key` argument in the + builtin :meth:`sorted` function, with the notable difference that + this `key` function should be *vectorized*. It should expect a + ``Series`` and return a Series with the same shape as the input. + It will be applied to each column in `by` independently. + + Returns + ------- + DataFrame or None + DataFrame with sorted values or None if ``inplace=True``. + + See Also + -------- + DataFrame.sort_index : Sort a DataFrame by the index. + Series.sort_values : Similar method for a Series. + + Examples + -------- + >>> df = pd.DataFrame({ + ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], + ... 'col2': [2, 1, 9, 8, 7, 4], + ... 'col3': [0, 1, 9, 4, 2, 3], + ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] + ... }) + >>> df + col1 col2 col3 col4 + 0 A 2 0 a + 1 A 1 1 B + 2 B 9 9 c + 3 NaN 8 4 D + 4 D 7 2 e + 5 C 4 3 F + + Sort by col1 + + >>> df.sort_values(by=['col1']) + col1 col2 col3 col4 + 0 A 2 0 a + 1 A 1 1 B + 2 B 9 9 c + 5 C 4 3 F + 4 D 7 2 e + 3 NaN 8 4 D + + Sort by multiple columns + + >>> df.sort_values(by=['col1', 'col2']) + col1 col2 col3 col4 + 1 A 1 1 B + 0 A 2 0 a + 2 B 9 9 c + 5 C 4 3 F + 4 D 7 2 e + 3 NaN 8 4 D + + Sort Descending + + >>> df.sort_values(by='col1', ascending=False) + col1 col2 col3 col4 + 4 D 7 2 e + 5 C 4 3 F + 2 B 9 9 c + 0 A 2 0 a + 1 A 1 1 B + 3 NaN 8 4 D + + Putting NAs first + + >>> df.sort_values(by='col1', ascending=False, na_position='first') + col1 col2 col3 col4 + 3 NaN 8 4 D + 4 D 7 2 e + 5 C 4 3 F + 2 B 9 9 c + 0 A 2 0 a + 1 A 1 1 B + + Sorting with a key function + + >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) + col1 col2 col3 col4 + 0 A 2 0 a + 1 A 1 1 B + 2 B 9 9 c + 3 NaN 8 4 D + 4 D 7 2 e + 5 C 4 3 F + + Natural sort with the key argument, + using the `natsort ` package. + + >>> df = pd.DataFrame({ + ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], + ... "value": [10, 20, 30, 40, 50] + ... }) + >>> df + time value + 0 0hr 10 + 1 128hr 20 + 2 72hr 30 + 3 48hr 40 + 4 96hr 50 + >>> from natsort import index_natsorted + >>> df.sort_values( + ... by="time", + ... key=lambda x: np.argsort(index_natsorted(df["time"])) + ... ) + time value + 0 0hr 10 + 3 48hr 40 + 2 72hr 30 + 4 96hr 50 + 1 128hr 20 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + axis = self._get_axis_number(axis) + ascending = validate_ascending(ascending) + if not isinstance(by, list): + by = [by] + # error: Argument 1 to "len" has incompatible type "Union[bool, List[bool]]"; + # expected "Sized" + if is_sequence(ascending) and ( + len(by) != len(ascending) # type: ignore[arg-type] + ): + # error: Argument 1 to "len" has incompatible type "Union[bool, + # List[bool]]"; expected "Sized" + raise ValueError( + f"Length of ascending ({len(ascending)})" # type: ignore[arg-type] + f" != length of by ({len(by)})" + ) + if len(by) > 1: + keys = [self._get_label_or_level_values(x, axis=axis) for x in by] + + # need to rewrap columns in Series to apply key function + if key is not None: + # error: List comprehension has incompatible type List[Series]; + # expected List[ndarray] + keys = [ + Series(k, name=name) # type: ignore[misc] + for (k, name) in zip(keys, by) + ] + + indexer = lexsort_indexer( + keys, orders=ascending, na_position=na_position, key=key + ) + elif len(by): + # len(by) == 1 + + k = self._get_label_or_level_values(by[0], axis=axis) + + # need to rewrap column in Series to apply key function + if key is not None: + # error: Incompatible types in assignment (expression has type + # "Series", variable has type "ndarray") + k = Series(k, name=by[0]) # type: ignore[assignment] + + if isinstance(ascending, (tuple, list)): + ascending = ascending[0] + + indexer = nargsort( + k, kind=kind, ascending=ascending, na_position=na_position, key=key + ) + else: + if inplace: + return self._update_inplace(self) + else: + return self.copy(deep=None) + + if is_range_indexer(indexer, len(indexer)): + result = self.copy(deep=(not inplace and not using_copy_on_write())) + if ignore_index: + result.index = default_index(len(result)) + + if inplace: + return self._update_inplace(result) + else: + return result + + new_data = self._mgr.take( + indexer, axis=self._get_block_manager_axis(axis), verify=False + ) + + if ignore_index: + new_data.set_axis( + self._get_block_manager_axis(axis), default_index(len(indexer)) + ) + + result = self._constructor_from_mgr(new_data, axes=new_data.axes) + if inplace: + return self._update_inplace(result) + else: + return result.__finalize__(self, method="sort_values") + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[True], + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> None: + ... + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[False] = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> DataFrame: + ... + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: bool = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> DataFrame | None: + ... + + def sort_index( + self, + *, + axis: Axis = 0, + level: IndexLabel | None = None, + ascending: bool | Sequence[bool] = True, + inplace: bool = False, + kind: SortKind = "quicksort", + na_position: NaPosition = "last", + sort_remaining: bool = True, + ignore_index: bool = False, + key: IndexKeyFunc | None = None, + ) -> DataFrame | None: + """ + Sort object by labels (along an axis). + + Returns a new DataFrame sorted by label if `inplace` argument is + ``False``, otherwise updates the original DataFrame and returns None. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis along which to sort. The value 0 identifies the rows, + and 1 identifies the columns. + level : int or level name or list of ints or list of level names + If not None, sort on values in specified index level(s). + ascending : bool or list-like of bools, default True + Sort ascending vs. descending. When the index is a MultiIndex the + sort direction can be controlled for each level individually. + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See also :func:`numpy.sort` for more + information. `mergesort` and `stable` are the only stable algorithms. For + DataFrames, this option is only applied when sorting on a single + column or label. + na_position : {'first', 'last'}, default 'last' + Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. + Not implemented for MultiIndex. + sort_remaining : bool, default True + If True and sorting by level and index is multilevel, sort by other + levels too (in order) after sorting by specified level. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + key : callable, optional + If not None, apply the key function to the index values + before sorting. This is similar to the `key` argument in the + builtin :meth:`sorted` function, with the notable difference that + this `key` function should be *vectorized*. It should expect an + ``Index`` and return an ``Index`` of the same shape. For MultiIndex + inputs, the key is applied *per level*. + + Returns + ------- + DataFrame or None + The original DataFrame sorted by the labels or None if ``inplace=True``. + + See Also + -------- + Series.sort_index : Sort Series by the index. + DataFrame.sort_values : Sort DataFrame by the value. + Series.sort_values : Sort Series by the value. + + Examples + -------- + >>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150], + ... columns=['A']) + >>> df.sort_index() + A + 1 4 + 29 2 + 100 1 + 150 5 + 234 3 + + By default, it sorts in ascending order, to sort in descending order, + use ``ascending=False`` + + >>> df.sort_index(ascending=False) + A + 234 3 + 150 5 + 100 1 + 29 2 + 1 4 + + A key function can be specified which is applied to the index before + sorting. For a ``MultiIndex`` this is applied to each level separately. + + >>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd']) + >>> df.sort_index(key=lambda x: x.str.lower()) + a + A 1 + b 2 + C 3 + d 4 + """ + return super().sort_index( + axis=axis, + level=level, + ascending=ascending, + inplace=inplace, + kind=kind, + na_position=na_position, + sort_remaining=sort_remaining, + ignore_index=ignore_index, + key=key, + ) + + def value_counts( + self, + subset: IndexLabel | None = None, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + dropna: bool = True, + ) -> Series: + """ + Return a Series containing the frequency of each distinct row in the Dataframe. + + Parameters + ---------- + subset : label or list of labels, optional + Columns to use when counting unique combinations. + normalize : bool, default False + Return proportions rather than frequencies. + sort : bool, default True + Sort by frequencies when True. Sort by DataFrame column values when False. + ascending : bool, default False + Sort in ascending order. + dropna : bool, default True + Don't include counts of rows that contain NA values. + + .. versionadded:: 1.3.0 + + Returns + ------- + Series + + See Also + -------- + Series.value_counts: Equivalent method on Series. + + Notes + ----- + The returned Series will have a MultiIndex with one level per input + column but an Index (non-multi) for a single label. By default, rows + that contain any NA values are omitted from the result. By default, + the resulting Series will be in descending order so that the first + element is the most frequently-occurring row. + + Examples + -------- + >>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6], + ... 'num_wings': [2, 0, 0, 0]}, + ... index=['falcon', 'dog', 'cat', 'ant']) + >>> df + num_legs num_wings + falcon 2 2 + dog 4 0 + cat 4 0 + ant 6 0 + + >>> df.value_counts() + num_legs num_wings + 4 0 2 + 2 2 1 + 6 0 1 + Name: count, dtype: int64 + + >>> df.value_counts(sort=False) + num_legs num_wings + 2 2 1 + 4 0 2 + 6 0 1 + Name: count, dtype: int64 + + >>> df.value_counts(ascending=True) + num_legs num_wings + 2 2 1 + 6 0 1 + 4 0 2 + Name: count, dtype: int64 + + >>> df.value_counts(normalize=True) + num_legs num_wings + 4 0 0.50 + 2 2 0.25 + 6 0 0.25 + Name: proportion, dtype: float64 + + With `dropna` set to `False` we can also count rows with NA values. + + >>> df = pd.DataFrame({'first_name': ['John', 'Anne', 'John', 'Beth'], + ... 'middle_name': ['Smith', pd.NA, pd.NA, 'Louise']}) + >>> df + first_name middle_name + 0 John Smith + 1 Anne + 2 John + 3 Beth Louise + + >>> df.value_counts() + first_name middle_name + Beth Louise 1 + John Smith 1 + Name: count, dtype: int64 + + >>> df.value_counts(dropna=False) + first_name middle_name + Anne NaN 1 + Beth Louise 1 + John Smith 1 + NaN 1 + Name: count, dtype: int64 + + >>> df.value_counts("first_name") + first_name + John 2 + Anne 1 + Beth 1 + Name: count, dtype: int64 + """ + if subset is None: + subset = self.columns.tolist() + + name = "proportion" if normalize else "count" + counts = self.groupby(subset, dropna=dropna, observed=False)._grouper.size() + counts.name = name + + if sort: + counts = counts.sort_values(ascending=ascending) + if normalize: + counts /= counts.sum() + + # Force MultiIndex for a list_like subset with a single column + if is_list_like(subset) and len(subset) == 1: # type: ignore[arg-type] + counts.index = MultiIndex.from_arrays( + [counts.index], names=[counts.index.name] + ) + + return counts + + def nlargest( + self, n: int, columns: IndexLabel, keep: NsmallestNlargestKeep = "first" + ) -> DataFrame: + """ + Return the first `n` rows ordered by `columns` in descending order. + + Return the first `n` rows with the largest values in `columns`, in + descending order. The columns that are not specified are returned as + well, but not used for ordering. + + This method is equivalent to + ``df.sort_values(columns, ascending=False).head(n)``, but more + performant. + + Parameters + ---------- + n : int + Number of rows to return. + columns : label or list of labels + Column label(s) to order by. + keep : {'first', 'last', 'all'}, default 'first' + Where there are duplicate values: + + - ``first`` : prioritize the first occurrence(s) + - ``last`` : prioritize the last occurrence(s) + - ``all`` : keep all the ties of the smallest item even if it means + selecting more than ``n`` items. + + Returns + ------- + DataFrame + The first `n` rows ordered by the given columns in descending + order. + + See Also + -------- + DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in + ascending order. + DataFrame.sort_values : Sort DataFrame by the values. + DataFrame.head : Return the first `n` rows without re-ordering. + + Notes + ----- + This function cannot be used with all column types. For example, when + specifying columns with `object` or `category` dtypes, ``TypeError`` is + raised. + + Examples + -------- + >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, + ... 434000, 434000, 337000, 11300, + ... 11300, 11300], + ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, + ... 17036, 182, 38, 311], + ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", + ... "IS", "NR", "TV", "AI"]}, + ... index=["Italy", "France", "Malta", + ... "Maldives", "Brunei", "Iceland", + ... "Nauru", "Tuvalu", "Anguilla"]) + >>> df + population GDP alpha-2 + Italy 59000000 1937894 IT + France 65000000 2583560 FR + Malta 434000 12011 MT + Maldives 434000 4520 MV + Brunei 434000 12128 BN + Iceland 337000 17036 IS + Nauru 11300 182 NR + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + + In the following example, we will use ``nlargest`` to select the three + rows having the largest values in column "population". + + >>> df.nlargest(3, 'population') + population GDP alpha-2 + France 65000000 2583560 FR + Italy 59000000 1937894 IT + Malta 434000 12011 MT + + When using ``keep='last'``, ties are resolved in reverse order: + + >>> df.nlargest(3, 'population', keep='last') + population GDP alpha-2 + France 65000000 2583560 FR + Italy 59000000 1937894 IT + Brunei 434000 12128 BN + + When using ``keep='all'``, the number of element kept can go beyond ``n`` + if there are duplicate values for the smallest element, all the + ties are kept: + + >>> df.nlargest(3, 'population', keep='all') + population GDP alpha-2 + France 65000000 2583560 FR + Italy 59000000 1937894 IT + Malta 434000 12011 MT + Maldives 434000 4520 MV + Brunei 434000 12128 BN + + However, ``nlargest`` does not keep ``n`` distinct largest elements: + + >>> df.nlargest(5, 'population', keep='all') + population GDP alpha-2 + France 65000000 2583560 FR + Italy 59000000 1937894 IT + Malta 434000 12011 MT + Maldives 434000 4520 MV + Brunei 434000 12128 BN + + To order by the largest values in column "population" and then "GDP", + we can specify multiple columns like in the next example. + + >>> df.nlargest(3, ['population', 'GDP']) + population GDP alpha-2 + France 65000000 2583560 FR + Italy 59000000 1937894 IT + Brunei 434000 12128 BN + """ + return selectn.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() + + def nsmallest( + self, n: int, columns: IndexLabel, keep: NsmallestNlargestKeep = "first" + ) -> DataFrame: + """ + Return the first `n` rows ordered by `columns` in ascending order. + + Return the first `n` rows with the smallest values in `columns`, in + ascending order. The columns that are not specified are returned as + well, but not used for ordering. + + This method is equivalent to + ``df.sort_values(columns, ascending=True).head(n)``, but more + performant. + + Parameters + ---------- + n : int + Number of items to retrieve. + columns : list or str + Column name or names to order by. + keep : {'first', 'last', 'all'}, default 'first' + Where there are duplicate values: + + - ``first`` : take the first occurrence. + - ``last`` : take the last occurrence. + - ``all`` : keep all the ties of the largest item even if it means + selecting more than ``n`` items. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.nlargest : Return the first `n` rows ordered by `columns` in + descending order. + DataFrame.sort_values : Sort DataFrame by the values. + DataFrame.head : Return the first `n` rows without re-ordering. + + Examples + -------- + >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, + ... 434000, 434000, 337000, 337000, + ... 11300, 11300], + ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, + ... 17036, 182, 38, 311], + ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", + ... "IS", "NR", "TV", "AI"]}, + ... index=["Italy", "France", "Malta", + ... "Maldives", "Brunei", "Iceland", + ... "Nauru", "Tuvalu", "Anguilla"]) + >>> df + population GDP alpha-2 + Italy 59000000 1937894 IT + France 65000000 2583560 FR + Malta 434000 12011 MT + Maldives 434000 4520 MV + Brunei 434000 12128 BN + Iceland 337000 17036 IS + Nauru 337000 182 NR + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + + In the following example, we will use ``nsmallest`` to select the + three rows having the smallest values in column "population". + + >>> df.nsmallest(3, 'population') + population GDP alpha-2 + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + Iceland 337000 17036 IS + + When using ``keep='last'``, ties are resolved in reverse order: + + >>> df.nsmallest(3, 'population', keep='last') + population GDP alpha-2 + Anguilla 11300 311 AI + Tuvalu 11300 38 TV + Nauru 337000 182 NR + + When using ``keep='all'``, the number of element kept can go beyond ``n`` + if there are duplicate values for the largest element, all the + ties are kept. + + >>> df.nsmallest(3, 'population', keep='all') + population GDP alpha-2 + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + Iceland 337000 17036 IS + Nauru 337000 182 NR + + However, ``nsmallest`` does not keep ``n`` distinct + smallest elements: + + >>> df.nsmallest(4, 'population', keep='all') + population GDP alpha-2 + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + Iceland 337000 17036 IS + Nauru 337000 182 NR + + To order by the smallest values in column "population" and then "GDP", we can + specify multiple columns like in the next example. + + >>> df.nsmallest(3, ['population', 'GDP']) + population GDP alpha-2 + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + Nauru 337000 182 NR + """ + return selectn.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest() + + @doc( + Series.swaplevel, + klass=_shared_doc_kwargs["klass"], + extra_params=dedent( + """axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to swap levels on. 0 or 'index' for row-wise, 1 or + 'columns' for column-wise.""" + ), + examples=dedent( + """\ + Examples + -------- + >>> df = pd.DataFrame( + ... {"Grade": ["A", "B", "A", "C"]}, + ... index=[ + ... ["Final exam", "Final exam", "Coursework", "Coursework"], + ... ["History", "Geography", "History", "Geography"], + ... ["January", "February", "March", "April"], + ... ], + ... ) + >>> df + Grade + Final exam History January A + Geography February B + Coursework History March A + Geography April C + + In the following example, we will swap the levels of the indices. + Here, we will swap the levels column-wise, but levels can be swapped row-wise + in a similar manner. Note that column-wise is the default behaviour. + By not supplying any arguments for i and j, we swap the last and second to + last indices. + + >>> df.swaplevel() + Grade + Final exam January History A + February Geography B + Coursework March History A + April Geography C + + By supplying one argument, we can choose which index to swap the last + index with. We can for example swap the first index with the last one as + follows. + + >>> df.swaplevel(0) + Grade + January History Final exam A + February Geography Final exam B + March History Coursework A + April Geography Coursework C + + We can also define explicitly which indices we want to swap by supplying values + for both i and j. Here, we for example swap the first and second indices. + + >>> df.swaplevel(0, 1) + Grade + History Final exam January A + Geography Final exam February B + History Coursework March A + Geography Coursework April C""" + ), + ) + def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame: + result = self.copy(deep=None) + + axis = self._get_axis_number(axis) + + if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover + raise TypeError("Can only swap levels on a hierarchical axis.") + + if axis == 0: + assert isinstance(result.index, MultiIndex) + result.index = result.index.swaplevel(i, j) + else: + assert isinstance(result.columns, MultiIndex) + result.columns = result.columns.swaplevel(i, j) + return result + + def reorder_levels(self, order: Sequence[int | str], axis: Axis = 0) -> DataFrame: + """ + Rearrange index levels using input order. May not drop or duplicate levels. + + Parameters + ---------- + order : list of int or list of str + List representing new level order. Reference level by number + (position) or by key (label). + axis : {0 or 'index', 1 or 'columns'}, default 0 + Where to reorder levels. + + Returns + ------- + DataFrame + + Examples + -------- + >>> data = { + ... "class": ["Mammals", "Mammals", "Reptiles"], + ... "diet": ["Omnivore", "Carnivore", "Carnivore"], + ... "species": ["Humans", "Dogs", "Snakes"], + ... } + >>> df = pd.DataFrame(data, columns=["class", "diet", "species"]) + >>> df = df.set_index(["class", "diet"]) + >>> df + species + class diet + Mammals Omnivore Humans + Carnivore Dogs + Reptiles Carnivore Snakes + + Let's reorder the levels of the index: + + >>> df.reorder_levels(["diet", "class"]) + species + diet class + Omnivore Mammals Humans + Carnivore Mammals Dogs + Reptiles Snakes + """ + axis = self._get_axis_number(axis) + if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover + raise TypeError("Can only reorder levels on a hierarchical axis.") + + result = self.copy(deep=None) + + if axis == 0: + assert isinstance(result.index, MultiIndex) + result.index = result.index.reorder_levels(order) + else: + assert isinstance(result.columns, MultiIndex) + result.columns = result.columns.reorder_levels(order) + return result + + # ---------------------------------------------------------------------- + # Arithmetic Methods + + def _cmp_method(self, other, op): + axis: Literal[1] = 1 # only relevant for Series other case + + self, other = self._align_for_op(other, axis, flex=False, level=None) + + # See GH#4537 for discussion of scalar op behavior + new_data = self._dispatch_frame_op(other, op, axis=axis) + return self._construct_result(new_data) + + def _arith_method(self, other, op): + if self._should_reindex_frame_op(other, op, 1, None, None): + return self._arith_method_with_reindex(other, op) + + axis: Literal[1] = 1 # only relevant for Series other case + other = ops.maybe_prepare_scalar_for_op(other, (self.shape[axis],)) + + self, other = self._align_for_op(other, axis, flex=True, level=None) + + with np.errstate(all="ignore"): + new_data = self._dispatch_frame_op(other, op, axis=axis) + return self._construct_result(new_data) + + _logical_method = _arith_method + + def _dispatch_frame_op( + self, right, func: Callable, axis: AxisInt | None = None + ) -> DataFrame: + """ + Evaluate the frame operation func(left, right) by evaluating + column-by-column, dispatching to the Series implementation. + + Parameters + ---------- + right : scalar, Series, or DataFrame + func : arithmetic or comparison operator + axis : {None, 0, 1} + + Returns + ------- + DataFrame + + Notes + ----- + Caller is responsible for setting np.errstate where relevant. + """ + # Get the appropriate array-op to apply to each column/block's values. + array_op = ops.get_array_op(func) + + right = lib.item_from_zerodim(right) + if not is_list_like(right): + # i.e. scalar, faster than checking np.ndim(right) == 0 + bm = self._mgr.apply(array_op, right=right) + return self._constructor_from_mgr(bm, axes=bm.axes) + + elif isinstance(right, DataFrame): + assert self.index.equals(right.index) + assert self.columns.equals(right.columns) + # TODO: The previous assertion `assert right._indexed_same(self)` + # fails in cases with empty columns reached via + # _frame_arith_method_with_reindex + + # TODO operate_blockwise expects a manager of the same type + bm = self._mgr.operate_blockwise( + # error: Argument 1 to "operate_blockwise" of "ArrayManager" has + # incompatible type "Union[ArrayManager, BlockManager]"; expected + # "ArrayManager" + # error: Argument 1 to "operate_blockwise" of "BlockManager" has + # incompatible type "Union[ArrayManager, BlockManager]"; expected + # "BlockManager" + right._mgr, # type: ignore[arg-type] + array_op, + ) + return self._constructor_from_mgr(bm, axes=bm.axes) + + elif isinstance(right, Series) and axis == 1: + # axis=1 means we want to operate row-by-row + assert right.index.equals(self.columns) + + right = right._values + # maybe_align_as_frame ensures we do not have an ndarray here + assert not isinstance(right, np.ndarray) + + arrays = [ + array_op(_left, _right) + for _left, _right in zip(self._iter_column_arrays(), right) + ] + + elif isinstance(right, Series): + assert right.index.equals(self.index) + right = right._values + + arrays = [array_op(left, right) for left in self._iter_column_arrays()] + + else: + raise NotImplementedError(right) + + return type(self)._from_arrays( + arrays, self.columns, self.index, verify_integrity=False + ) + + def _combine_frame(self, other: DataFrame, func, fill_value=None): + # at this point we have `self._indexed_same(other)` + + if fill_value is None: + # since _arith_op may be called in a loop, avoid function call + # overhead if possible by doing this check once + _arith_op = func + + else: + + def _arith_op(left, right): + # for the mixed_type case where we iterate over columns, + # _arith_op(left, right) is equivalent to + # left._binop(right, func, fill_value=fill_value) + left, right = ops.fill_binop(left, right, fill_value) + return func(left, right) + + new_data = self._dispatch_frame_op(other, _arith_op) + return new_data + + def _arith_method_with_reindex(self, right: DataFrame, op) -> DataFrame: + """ + For DataFrame-with-DataFrame operations that require reindexing, + operate only on shared columns, then reindex. + + Parameters + ---------- + right : DataFrame + op : binary operator + + Returns + ------- + DataFrame + """ + left = self + + # GH#31623, only operate on shared columns + cols, lcols, rcols = left.columns.join( + right.columns, how="inner", level=None, return_indexers=True + ) + + new_left = left.iloc[:, lcols] + new_right = right.iloc[:, rcols] + result = op(new_left, new_right) + + # Do the join on the columns instead of using left._align_for_op + # to avoid constructing two potentially large/sparse DataFrames + join_columns, _, _ = left.columns.join( + right.columns, how="outer", level=None, return_indexers=True + ) + + if result.columns.has_duplicates: + # Avoid reindexing with a duplicate axis. + # https://github.com/pandas-dev/pandas/issues/35194 + indexer, _ = result.columns.get_indexer_non_unique(join_columns) + indexer = algorithms.unique1d(indexer) + result = result._reindex_with_indexers( + {1: [join_columns, indexer]}, allow_dups=True + ) + else: + result = result.reindex(join_columns, axis=1) + + return result + + def _should_reindex_frame_op(self, right, op, axis: int, fill_value, level) -> bool: + """ + Check if this is an operation between DataFrames that will need to reindex. + """ + if op is operator.pow or op is roperator.rpow: + # GH#32685 pow has special semantics for operating with null values + return False + + if not isinstance(right, DataFrame): + return False + + if fill_value is None and level is None and axis == 1: + # TODO: any other cases we should handle here? + + # Intersection is always unique so we have to check the unique columns + left_uniques = self.columns.unique() + right_uniques = right.columns.unique() + cols = left_uniques.intersection(right_uniques) + if len(cols) and not ( + len(cols) == len(left_uniques) and len(cols) == len(right_uniques) + ): + # TODO: is there a shortcut available when len(cols) == 0? + return True + + return False + + def _align_for_op( + self, + other, + axis: AxisInt, + flex: bool | None = False, + level: Level | None = None, + ): + """ + Convert rhs to meet lhs dims if input is list, tuple or np.ndarray. + + Parameters + ---------- + left : DataFrame + right : Any + axis : int + flex : bool or None, default False + Whether this is a flex op, in which case we reindex. + None indicates not to check for alignment. + level : int or level name, default None + + Returns + ------- + left : DataFrame + right : Any + """ + left, right = self, other + + def to_series(right): + msg = ( + "Unable to coerce to Series, " + "length must be {req_len}: given {given_len}" + ) + + # pass dtype to avoid doing inference, which would break consistency + # with Index/Series ops + dtype = None + if getattr(right, "dtype", None) == object: + # can't pass right.dtype unconditionally as that would break on e.g. + # datetime64[h] ndarray + dtype = object + + if axis == 0: + if len(left.index) != len(right): + raise ValueError( + msg.format(req_len=len(left.index), given_len=len(right)) + ) + right = left._constructor_sliced(right, index=left.index, dtype=dtype) + else: + if len(left.columns) != len(right): + raise ValueError( + msg.format(req_len=len(left.columns), given_len=len(right)) + ) + right = left._constructor_sliced(right, index=left.columns, dtype=dtype) + return right + + if isinstance(right, np.ndarray): + if right.ndim == 1: + right = to_series(right) + + elif right.ndim == 2: + # We need to pass dtype=right.dtype to retain object dtype + # otherwise we lose consistency with Index and array ops + dtype = None + if right.dtype == object: + # can't pass right.dtype unconditionally as that would break on e.g. + # datetime64[h] ndarray + dtype = object + + if right.shape == left.shape: + right = left._constructor( + right, index=left.index, columns=left.columns, dtype=dtype + ) + + elif right.shape[0] == left.shape[0] and right.shape[1] == 1: + # Broadcast across columns + right = np.broadcast_to(right, left.shape) + right = left._constructor( + right, index=left.index, columns=left.columns, dtype=dtype + ) + + elif right.shape[1] == left.shape[1] and right.shape[0] == 1: + # Broadcast along rows + right = to_series(right[0, :]) + + else: + raise ValueError( + "Unable to coerce to DataFrame, shape " + f"must be {left.shape}: given {right.shape}" + ) + + elif right.ndim > 2: + raise ValueError( + "Unable to coerce to Series/DataFrame, " + f"dimension must be <= 2: {right.shape}" + ) + + elif is_list_like(right) and not isinstance(right, (Series, DataFrame)): + # GH#36702. Raise when attempting arithmetic with list of array-like. + if any(is_array_like(el) for el in right): + raise ValueError( + f"Unable to coerce list of {type(right[0])} to Series/DataFrame" + ) + # GH#17901 + right = to_series(right) + + if flex is not None and isinstance(right, DataFrame): + if not left._indexed_same(right): + if flex: + left, right = left.align( + right, join="outer", level=level, copy=False + ) + else: + raise ValueError( + "Can only compare identically-labeled (both index and columns) " + "DataFrame objects" + ) + elif isinstance(right, Series): + # axis=1 is default for DataFrame-with-Series op + axis = axis if axis is not None else 1 + if not flex: + if not left.axes[axis].equals(right.index): + raise ValueError( + "Operands are not aligned. Do " + "`left, right = left.align(right, axis=1, copy=False)` " + "before operating." + ) + + left, right = left.align( + right, + join="outer", + axis=axis, + level=level, + copy=False, + ) + right = left._maybe_align_series_as_frame(right, axis) + + return left, right + + def _maybe_align_series_as_frame(self, series: Series, axis: AxisInt): + """ + If the Series operand is not EA-dtype, we can broadcast to 2D and operate + blockwise. + """ + rvalues = series._values + if not isinstance(rvalues, np.ndarray): + # TODO(EA2D): no need to special-case with 2D EAs + if rvalues.dtype in ("datetime64[ns]", "timedelta64[ns]"): + # We can losslessly+cheaply cast to ndarray + rvalues = np.asarray(rvalues) + else: + return series + + if axis == 0: + rvalues = rvalues.reshape(-1, 1) + else: + rvalues = rvalues.reshape(1, -1) + + rvalues = np.broadcast_to(rvalues, self.shape) + # pass dtype to avoid doing inference + return self._constructor( + rvalues, + index=self.index, + columns=self.columns, + dtype=rvalues.dtype, + ) + + def _flex_arith_method( + self, other, op, *, axis: Axis = "columns", level=None, fill_value=None + ): + axis = self._get_axis_number(axis) if axis is not None else 1 + + if self._should_reindex_frame_op(other, op, axis, fill_value, level): + return self._arith_method_with_reindex(other, op) + + if isinstance(other, Series) and fill_value is not None: + # TODO: We could allow this in cases where we end up going + # through the DataFrame path + raise NotImplementedError(f"fill_value {fill_value} not supported.") + + other = ops.maybe_prepare_scalar_for_op(other, self.shape) + self, other = self._align_for_op(other, axis, flex=True, level=level) + + with np.errstate(all="ignore"): + if isinstance(other, DataFrame): + # Another DataFrame + new_data = self._combine_frame(other, op, fill_value) + + elif isinstance(other, Series): + new_data = self._dispatch_frame_op(other, op, axis=axis) + else: + # in this case we always have `np.ndim(other) == 0` + if fill_value is not None: + self = self.fillna(fill_value) + + new_data = self._dispatch_frame_op(other, op) + + return self._construct_result(new_data) + + def _construct_result(self, result) -> DataFrame: + """ + Wrap the result of an arithmetic, comparison, or logical operation. + + Parameters + ---------- + result : DataFrame + + Returns + ------- + DataFrame + """ + out = self._constructor(result, copy=False).__finalize__(self) + # Pin columns instead of passing to constructor for compat with + # non-unique columns case + out.columns = self.columns + out.index = self.index + return out + + def __divmod__(self, other) -> tuple[DataFrame, DataFrame]: + # Naive implementation, room for optimization + div = self // other + mod = self - div * other + return div, mod + + def __rdivmod__(self, other) -> tuple[DataFrame, DataFrame]: + # Naive implementation, room for optimization + div = other // self + mod = other - div * self + return div, mod + + def _flex_cmp_method(self, other, op, *, axis: Axis = "columns", level=None): + axis = self._get_axis_number(axis) if axis is not None else 1 + + self, other = self._align_for_op(other, axis, flex=True, level=level) + + new_data = self._dispatch_frame_op(other, op, axis=axis) + return self._construct_result(new_data) + + @Appender(ops.make_flex_doc("eq", "dataframe")) + def eq(self, other, axis: Axis = "columns", level=None) -> DataFrame: + return self._flex_cmp_method(other, operator.eq, axis=axis, level=level) + + @Appender(ops.make_flex_doc("ne", "dataframe")) + def ne(self, other, axis: Axis = "columns", level=None) -> DataFrame: + return self._flex_cmp_method(other, operator.ne, axis=axis, level=level) + + @Appender(ops.make_flex_doc("le", "dataframe")) + def le(self, other, axis: Axis = "columns", level=None) -> DataFrame: + return self._flex_cmp_method(other, operator.le, axis=axis, level=level) + + @Appender(ops.make_flex_doc("lt", "dataframe")) + def lt(self, other, axis: Axis = "columns", level=None) -> DataFrame: + return self._flex_cmp_method(other, operator.lt, axis=axis, level=level) + + @Appender(ops.make_flex_doc("ge", "dataframe")) + def ge(self, other, axis: Axis = "columns", level=None) -> DataFrame: + return self._flex_cmp_method(other, operator.ge, axis=axis, level=level) + + @Appender(ops.make_flex_doc("gt", "dataframe")) + def gt(self, other, axis: Axis = "columns", level=None) -> DataFrame: + return self._flex_cmp_method(other, operator.gt, axis=axis, level=level) + + @Appender(ops.make_flex_doc("add", "dataframe")) + def add( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, operator.add, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("radd", "dataframe")) + def radd( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, roperator.radd, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("sub", "dataframe")) + def sub( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, operator.sub, level=level, fill_value=fill_value, axis=axis + ) + + subtract = sub + + @Appender(ops.make_flex_doc("rsub", "dataframe")) + def rsub( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, roperator.rsub, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("mul", "dataframe")) + def mul( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, operator.mul, level=level, fill_value=fill_value, axis=axis + ) + + multiply = mul + + @Appender(ops.make_flex_doc("rmul", "dataframe")) + def rmul( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, roperator.rmul, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("truediv", "dataframe")) + def truediv( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, operator.truediv, level=level, fill_value=fill_value, axis=axis + ) + + div = truediv + divide = truediv + + @Appender(ops.make_flex_doc("rtruediv", "dataframe")) + def rtruediv( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, roperator.rtruediv, level=level, fill_value=fill_value, axis=axis + ) + + rdiv = rtruediv + + @Appender(ops.make_flex_doc("floordiv", "dataframe")) + def floordiv( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, operator.floordiv, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rfloordiv", "dataframe")) + def rfloordiv( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, roperator.rfloordiv, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("mod", "dataframe")) + def mod( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, operator.mod, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rmod", "dataframe")) + def rmod( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, roperator.rmod, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("pow", "dataframe")) + def pow( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, operator.pow, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rpow", "dataframe")) + def rpow( + self, other, axis: Axis = "columns", level=None, fill_value=None + ) -> DataFrame: + return self._flex_arith_method( + other, roperator.rpow, level=level, fill_value=fill_value, axis=axis + ) + + # ---------------------------------------------------------------------- + # Combination-Related + + @doc( + _shared_docs["compare"], + dedent( + """ + Returns + ------- + DataFrame + DataFrame that shows the differences stacked side by side. + + The resulting index will be a MultiIndex with 'self' and 'other' + stacked alternately at the inner level. + + Raises + ------ + ValueError + When the two DataFrames don't have identical labels or shape. + + See Also + -------- + Series.compare : Compare with another Series and show differences. + DataFrame.equals : Test whether two objects contain the same elements. + + Notes + ----- + Matching NaNs will not appear as a difference. + + Can only compare identically-labeled + (i.e. same shape, identical row and column labels) DataFrames + + Examples + -------- + >>> df = pd.DataFrame( + ... {{ + ... "col1": ["a", "a", "b", "b", "a"], + ... "col2": [1.0, 2.0, 3.0, np.nan, 5.0], + ... "col3": [1.0, 2.0, 3.0, 4.0, 5.0] + ... }}, + ... columns=["col1", "col2", "col3"], + ... ) + >>> df + col1 col2 col3 + 0 a 1.0 1.0 + 1 a 2.0 2.0 + 2 b 3.0 3.0 + 3 b NaN 4.0 + 4 a 5.0 5.0 + + >>> df2 = df.copy() + >>> df2.loc[0, 'col1'] = 'c' + >>> df2.loc[2, 'col3'] = 4.0 + >>> df2 + col1 col2 col3 + 0 c 1.0 1.0 + 1 a 2.0 2.0 + 2 b 3.0 4.0 + 3 b NaN 4.0 + 4 a 5.0 5.0 + + Align the differences on columns + + >>> df.compare(df2) + col1 col3 + self other self other + 0 a c NaN NaN + 2 NaN NaN 3.0 4.0 + + Assign result_names + + >>> df.compare(df2, result_names=("left", "right")) + col1 col3 + left right left right + 0 a c NaN NaN + 2 NaN NaN 3.0 4.0 + + Stack the differences on rows + + >>> df.compare(df2, align_axis=0) + col1 col3 + 0 self a NaN + other c NaN + 2 self NaN 3.0 + other NaN 4.0 + + Keep the equal values + + >>> df.compare(df2, keep_equal=True) + col1 col3 + self other self other + 0 a c 1.0 1.0 + 2 b b 3.0 4.0 + + Keep all original rows and columns + + >>> df.compare(df2, keep_shape=True) + col1 col2 col3 + self other self other self other + 0 a c NaN NaN NaN NaN + 1 NaN NaN NaN NaN NaN NaN + 2 NaN NaN NaN NaN 3.0 4.0 + 3 NaN NaN NaN NaN NaN NaN + 4 NaN NaN NaN NaN NaN NaN + + Keep all original rows and columns and also all original values + + >>> df.compare(df2, keep_shape=True, keep_equal=True) + col1 col2 col3 + self other self other self other + 0 a c 1.0 1.0 1.0 1.0 + 1 a a 2.0 2.0 2.0 2.0 + 2 b b 3.0 3.0 3.0 4.0 + 3 b b NaN NaN 4.0 4.0 + 4 a a 5.0 5.0 5.0 5.0 + """ + ), + klass=_shared_doc_kwargs["klass"], + ) + def compare( + self, + other: DataFrame, + align_axis: Axis = 1, + keep_shape: bool = False, + keep_equal: bool = False, + result_names: Suffixes = ("self", "other"), + ) -> DataFrame: + return super().compare( + other=other, + align_axis=align_axis, + keep_shape=keep_shape, + keep_equal=keep_equal, + result_names=result_names, + ) + + def combine( + self, + other: DataFrame, + func: Callable[[Series, Series], Series | Hashable], + fill_value=None, + overwrite: bool = True, + ) -> DataFrame: + """ + Perform column-wise combine with another DataFrame. + + Combines a DataFrame with `other` DataFrame using `func` + to element-wise combine columns. The row and column indexes of the + resulting DataFrame will be the union of the two. + + Parameters + ---------- + other : DataFrame + The DataFrame to merge column-wise. + func : function + Function that takes two series as inputs and return a Series or a + scalar. Used to merge the two dataframes column by columns. + fill_value : scalar value, default None + The value to fill NaNs with prior to passing any column to the + merge func. + overwrite : bool, default True + If True, columns in `self` that do not exist in `other` will be + overwritten with NaNs. + + Returns + ------- + DataFrame + Combination of the provided DataFrames. + + See Also + -------- + DataFrame.combine_first : Combine two DataFrame objects and default to + non-null values in frame calling the method. + + Examples + -------- + Combine using a simple function that chooses the smaller column. + + >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) + >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 + >>> df1.combine(df2, take_smaller) + A B + 0 0 3 + 1 0 3 + + Example using a true element-wise combine function. + + >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]}) + >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> df1.combine(df2, np.minimum) + A B + 0 1 2 + 1 0 3 + + Using `fill_value` fills Nones prior to passing the column to the + merge function. + + >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) + >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> df1.combine(df2, take_smaller, fill_value=-5) + A B + 0 0 -5.0 + 1 0 4.0 + + However, if the same element in both dataframes is None, that None + is preserved + + >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) + >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) + >>> df1.combine(df2, take_smaller, fill_value=-5) + A B + 0 0 -5.0 + 1 0 3.0 + + Example that demonstrates the use of `overwrite` and behavior when + the axis differ between the dataframes. + + >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) + >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) + >>> df1.combine(df2, take_smaller) + A B C + 0 NaN NaN NaN + 1 NaN 3.0 -10.0 + 2 NaN 3.0 1.0 + + >>> df1.combine(df2, take_smaller, overwrite=False) + A B C + 0 0.0 NaN NaN + 1 0.0 3.0 -10.0 + 2 NaN 3.0 1.0 + + Demonstrating the preference of the passed in dataframe. + + >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) + >>> df2.combine(df1, take_smaller) + A B C + 0 0.0 NaN NaN + 1 0.0 3.0 NaN + 2 NaN 3.0 NaN + + >>> df2.combine(df1, take_smaller, overwrite=False) + A B C + 0 0.0 NaN NaN + 1 0.0 3.0 1.0 + 2 NaN 3.0 1.0 + """ + other_idxlen = len(other.index) # save for compare + + this, other = self.align(other, copy=False) + new_index = this.index + + if other.empty and len(new_index) == len(self.index): + return self.copy() + + if self.empty and len(other) == other_idxlen: + return other.copy() + + # sorts if possible; otherwise align above ensures that these are set-equal + new_columns = this.columns.union(other.columns) + do_fill = fill_value is not None + result = {} + for col in new_columns: + series = this[col] + other_series = other[col] + + this_dtype = series.dtype + other_dtype = other_series.dtype + + this_mask = isna(series) + other_mask = isna(other_series) + + # don't overwrite columns unnecessarily + # DO propagate if this column is not in the intersection + if not overwrite and other_mask.all(): + result[col] = this[col].copy() + continue + + if do_fill: + series = series.copy() + other_series = other_series.copy() + series[this_mask] = fill_value + other_series[other_mask] = fill_value + + if col not in self.columns: + # If self DataFrame does not have col in other DataFrame, + # try to promote series, which is all NaN, as other_dtype. + new_dtype = other_dtype + try: + series = series.astype(new_dtype, copy=False) + except ValueError: + # e.g. new_dtype is integer types + pass + else: + # if we have different dtypes, possibly promote + new_dtype = find_common_type([this_dtype, other_dtype]) + series = series.astype(new_dtype, copy=False) + other_series = other_series.astype(new_dtype, copy=False) + + arr = func(series, other_series) + if isinstance(new_dtype, np.dtype): + # if new_dtype is an EA Dtype, then `func` is expected to return + # the correct dtype without any additional casting + # error: No overload variant of "maybe_downcast_to_dtype" matches + # argument types "Union[Series, Hashable]", "dtype[Any]" + arr = maybe_downcast_to_dtype( # type: ignore[call-overload] + arr, new_dtype + ) + + result[col] = arr + + # convert_objects just in case + frame_result = self._constructor(result, index=new_index, columns=new_columns) + return frame_result.__finalize__(self, method="combine") + + def combine_first(self, other: DataFrame) -> DataFrame: + """ + Update null elements with value in the same location in `other`. + + Combine two DataFrame objects by filling null values in one DataFrame + with non-null values from other DataFrame. The row and column indexes + of the resulting DataFrame will be the union of the two. The resulting + dataframe contains the 'first' dataframe values and overrides the + second one values where both first.loc[index, col] and + second.loc[index, col] are not missing values, upon calling + first.combine_first(second). + + Parameters + ---------- + other : DataFrame + Provided DataFrame to use to fill null values. + + Returns + ------- + DataFrame + The result of combining the provided DataFrame with the other object. + + See Also + -------- + DataFrame.combine : Perform series-wise operation on two DataFrames + using a given function. + + Examples + -------- + >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) + >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> df1.combine_first(df2) + A B + 0 1.0 3.0 + 1 0.0 4.0 + + Null values still persist if the location of that null value + does not exist in `other` + + >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) + >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) + >>> df1.combine_first(df2) + A B C + 0 NaN 4.0 NaN + 1 0.0 3.0 1.0 + 2 NaN 3.0 1.0 + """ + from pandas.core.computation import expressions + + def combiner(x: Series, y: Series): + mask = x.isna()._values + + x_values = x._values + y_values = y._values + + # If the column y in other DataFrame is not in first DataFrame, + # just return y_values. + if y.name not in self.columns: + return y_values + + return expressions.where(mask, y_values, x_values) + + if len(other) == 0: + combined = self.reindex( + self.columns.append(other.columns.difference(self.columns)), axis=1 + ) + combined = combined.astype(other.dtypes) + else: + combined = self.combine(other, combiner, overwrite=False) + + dtypes = { + col: find_common_type([self.dtypes[col], other.dtypes[col]]) + for col in self.columns.intersection(other.columns) + if combined.dtypes[col] != self.dtypes[col] + } + + if dtypes: + combined = combined.astype(dtypes) + + return combined.__finalize__(self, method="combine_first") + + def update( + self, + other, + join: UpdateJoin = "left", + overwrite: bool = True, + filter_func=None, + errors: IgnoreRaise = "ignore", + ) -> None: + """ + Modify in place using non-NA values from another DataFrame. + + Aligns on indices. There is no return value. + + Parameters + ---------- + other : DataFrame, or object coercible into a DataFrame + Should have at least one matching index/column label + with the original DataFrame. If a Series is passed, + its name attribute must be set, and that will be + used as the column name to align with the original DataFrame. + join : {'left'}, default 'left' + Only left join is implemented, keeping the index and columns of the + original object. + overwrite : bool, default True + How to handle non-NA values for overlapping keys: + + * True: overwrite original DataFrame's values + with values from `other`. + * False: only update values that are NA in + the original DataFrame. + + filter_func : callable(1d-array) -> bool 1d-array, optional + Can choose to replace values other than NA. Return True for values + that should be updated. + errors : {'raise', 'ignore'}, default 'ignore' + If 'raise', will raise a ValueError if the DataFrame and `other` + both contain non-NA data in the same place. + + Returns + ------- + None + This method directly changes calling object. + + Raises + ------ + ValueError + * When `errors='raise'` and there's overlapping non-NA data. + * When `errors` is not either `'ignore'` or `'raise'` + NotImplementedError + * If `join != 'left'` + + See Also + -------- + dict.update : Similar method for dictionaries. + DataFrame.merge : For column(s)-on-column(s) operations. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2, 3], + ... 'B': [400, 500, 600]}) + >>> new_df = pd.DataFrame({'B': [4, 5, 6], + ... 'C': [7, 8, 9]}) + >>> df.update(new_df) + >>> df + A B + 0 1 4 + 1 2 5 + 2 3 6 + + The DataFrame's length does not increase as a result of the update, + only values at matching index/column labels are updated. + + >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], + ... 'B': ['x', 'y', 'z']}) + >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) + >>> df.update(new_df) + >>> df + A B + 0 a d + 1 b e + 2 c f + + >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], + ... 'B': ['x', 'y', 'z']}) + >>> new_df = pd.DataFrame({'B': ['d', 'f']}, index=[0, 2]) + >>> df.update(new_df) + >>> df + A B + 0 a d + 1 b y + 2 c f + + For Series, its name attribute must be set. + + >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], + ... 'B': ['x', 'y', 'z']}) + >>> new_column = pd.Series(['d', 'e', 'f'], name='B') + >>> df.update(new_column) + >>> df + A B + 0 a d + 1 b e + 2 c f + + If `other` contains NaNs the corresponding values are not updated + in the original dataframe. + + >>> df = pd.DataFrame({'A': [1, 2, 3], + ... 'B': [400., 500., 600.]}) + >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) + >>> df.update(new_df) + >>> df + A B + 0 1 4.0 + 1 2 500.0 + 2 3 6.0 + """ + + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + elif not PYPY and not using_copy_on_write() and self._is_view_after_cow_rules(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_warning_method_msg, + FutureWarning, + stacklevel=2, + ) + + # TODO: Support other joins + if join != "left": # pragma: no cover + raise NotImplementedError("Only left join is supported") + if errors not in ["ignore", "raise"]: + raise ValueError("The parameter errors must be either 'ignore' or 'raise'") + + if not isinstance(other, DataFrame): + other = DataFrame(other) + + other = other.reindex(self.index) + + for col in self.columns.intersection(other.columns): + this = self[col]._values + that = other[col]._values + + if filter_func is not None: + mask = ~filter_func(this) | isna(that) + else: + if errors == "raise": + mask_this = notna(that) + mask_that = notna(this) + if any(mask_this & mask_that): + raise ValueError("Data overlaps.") + + if overwrite: + mask = isna(that) + else: + mask = notna(this) + + # don't overwrite columns unnecessarily + if mask.all(): + continue + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="Downcasting behavior", + category=FutureWarning, + ) + # GH#57124 - `that` might get upcasted because of NA values, and then + # downcasted in where because of the mask. Ignoring the warning + # is a stopgap, will replace with a new implementation of update + # in 3.0. + self.loc[:, col] = self[col].where(mask, that) + + # ---------------------------------------------------------------------- + # Data reshaping + @Appender( + dedent( + """ + Examples + -------- + >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', + ... 'Parrot', 'Parrot'], + ... 'Max Speed': [380., 370., 24., 26.]}) + >>> df + Animal Max Speed + 0 Falcon 380.0 + 1 Falcon 370.0 + 2 Parrot 24.0 + 3 Parrot 26.0 + >>> df.groupby(['Animal']).mean() + Max Speed + Animal + Falcon 375.0 + Parrot 25.0 + + **Hierarchical Indexes** + + We can groupby different levels of a hierarchical index + using the `level` parameter: + + >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], + ... ['Captive', 'Wild', 'Captive', 'Wild']] + >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) + >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]}, + ... index=index) + >>> df + Max Speed + Animal Type + Falcon Captive 390.0 + Wild 350.0 + Parrot Captive 30.0 + Wild 20.0 + >>> df.groupby(level=0).mean() + Max Speed + Animal + Falcon 370.0 + Parrot 25.0 + >>> df.groupby(level="Type").mean() + Max Speed + Type + Captive 210.0 + Wild 185.0 + + We can also choose to include NA in group keys or not by setting + `dropna` parameter, the default setting is `True`. + + >>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]] + >>> df = pd.DataFrame(l, columns=["a", "b", "c"]) + + >>> df.groupby(by=["b"]).sum() + a c + b + 1.0 2 3 + 2.0 2 5 + + >>> df.groupby(by=["b"], dropna=False).sum() + a c + b + 1.0 2 3 + 2.0 2 5 + NaN 1 4 + + >>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]] + >>> df = pd.DataFrame(l, columns=["a", "b", "c"]) + + >>> df.groupby(by="a").sum() + b c + a + a 13.0 13.0 + b 12.3 123.0 + + >>> df.groupby(by="a", dropna=False).sum() + b c + a + a 13.0 13.0 + b 12.3 123.0 + NaN 12.3 33.0 + + When using ``.apply()``, use ``group_keys`` to include or exclude the + group keys. The ``group_keys`` argument defaults to ``True`` (include). + + >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', + ... 'Parrot', 'Parrot'], + ... 'Max Speed': [380., 370., 24., 26.]}) + >>> df.groupby("Animal", group_keys=True)[['Max Speed']].apply(lambda x: x) + Max Speed + Animal + Falcon 0 380.0 + 1 370.0 + Parrot 2 24.0 + 3 26.0 + + >>> df.groupby("Animal", group_keys=False)[['Max Speed']].apply(lambda x: x) + Max Speed + 0 380.0 + 1 370.0 + 2 24.0 + 3 26.0 + """ + ) + ) + @Appender(_shared_docs["groupby"] % _shared_doc_kwargs) + def groupby( + self, + by=None, + axis: Axis | lib.NoDefault = lib.no_default, + level: IndexLabel | None = None, + as_index: bool = True, + sort: bool = True, + group_keys: bool = True, + observed: bool | lib.NoDefault = lib.no_default, + dropna: bool = True, + ) -> DataFrameGroupBy: + if axis is not lib.no_default: + axis = self._get_axis_number(axis) + if axis == 1: + warnings.warn( + "DataFrame.groupby with axis=1 is deprecated. Do " + "`frame.T.groupby(...)` without axis instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + warnings.warn( + "The 'axis' keyword in DataFrame.groupby is deprecated and " + "will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + axis = 0 + + from pandas.core.groupby.generic import DataFrameGroupBy + + if level is None and by is None: + raise TypeError("You have to supply one of 'by' and 'level'") + + return DataFrameGroupBy( + obj=self, + keys=by, + axis=axis, + level=level, + as_index=as_index, + sort=sort, + group_keys=group_keys, + observed=observed, + dropna=dropna, + ) + + _shared_docs[ + "pivot" + ] = """ + Return reshaped DataFrame organized by given index / column values. + + Reshape data (produce a "pivot" table) based on column values. Uses + unique values from specified `index` / `columns` to form axes of the + resulting DataFrame. This function does not support data + aggregation, multiple values will result in a MultiIndex in the + columns. See the :ref:`User Guide ` for more on reshaping. + + Parameters + ----------%s + columns : str or object or a list of str + Column to use to make new frame's columns. + index : str or object or a list of str, optional + Column to use to make new frame's index. If not given, uses existing index. + values : str, object or a list of the previous, optional + Column(s) to use for populating new frame's values. If not + specified, all remaining columns will be used and the result will + have hierarchically indexed columns. + + Returns + ------- + DataFrame + Returns reshaped DataFrame. + + Raises + ------ + ValueError: + When there are any `index`, `columns` combinations with multiple + values. `DataFrame.pivot_table` when you need to aggregate. + + See Also + -------- + DataFrame.pivot_table : Generalization of pivot that can handle + duplicate values for one index/column pair. + DataFrame.unstack : Pivot based on the index values instead of a + column. + wide_to_long : Wide panel to long format. Less flexible but more + user-friendly than melt. + + Notes + ----- + For finer-tuned control, see hierarchical indexing documentation along + with the related stack/unstack methods. + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', + ... 'two'], + ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], + ... 'baz': [1, 2, 3, 4, 5, 6], + ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}) + >>> df + foo bar baz zoo + 0 one A 1 x + 1 one B 2 y + 2 one C 3 z + 3 two A 4 q + 4 two B 5 w + 5 two C 6 t + + >>> df.pivot(index='foo', columns='bar', values='baz') + bar A B C + foo + one 1 2 3 + two 4 5 6 + + >>> df.pivot(index='foo', columns='bar')['baz'] + bar A B C + foo + one 1 2 3 + two 4 5 6 + + >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo']) + baz zoo + bar A B C A B C + foo + one 1 2 3 x y z + two 4 5 6 q w t + + You could also assign a list of column names or a list of index names. + + >>> df = pd.DataFrame({ + ... "lev1": [1, 1, 1, 2, 2, 2], + ... "lev2": [1, 1, 2, 1, 1, 2], + ... "lev3": [1, 2, 1, 2, 1, 2], + ... "lev4": [1, 2, 3, 4, 5, 6], + ... "values": [0, 1, 2, 3, 4, 5]}) + >>> df + lev1 lev2 lev3 lev4 values + 0 1 1 1 1 0 + 1 1 1 2 2 1 + 2 1 2 1 3 2 + 3 2 1 2 4 3 + 4 2 1 1 5 4 + 5 2 2 2 6 5 + + >>> df.pivot(index="lev1", columns=["lev2", "lev3"], values="values") + lev2 1 2 + lev3 1 2 1 2 + lev1 + 1 0.0 1.0 2.0 NaN + 2 4.0 3.0 NaN 5.0 + + >>> df.pivot(index=["lev1", "lev2"], columns=["lev3"], values="values") + lev3 1 2 + lev1 lev2 + 1 1 0.0 1.0 + 2 2.0 NaN + 2 1 4.0 3.0 + 2 NaN 5.0 + + A ValueError is raised if there are any duplicates. + + >>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'], + ... "bar": ['A', 'A', 'B', 'C'], + ... "baz": [1, 2, 3, 4]}) + >>> df + foo bar baz + 0 one A 1 + 1 one A 2 + 2 two B 3 + 3 two C 4 + + Notice that the first two rows are the same for our `index` + and `columns` arguments. + + >>> df.pivot(index='foo', columns='bar', values='baz') + Traceback (most recent call last): + ... + ValueError: Index contains duplicate entries, cannot reshape + """ + + @Substitution("") + @Appender(_shared_docs["pivot"]) + def pivot( + self, *, columns, index=lib.no_default, values=lib.no_default + ) -> DataFrame: + from pandas.core.reshape.pivot import pivot + + return pivot(self, index=index, columns=columns, values=values) + + _shared_docs[ + "pivot_table" + ] = """ + Create a spreadsheet-style pivot table as a DataFrame. + + The levels in the pivot table will be stored in MultiIndex objects + (hierarchical indexes) on the index and columns of the result DataFrame. + + Parameters + ----------%s + values : list-like or scalar, optional + Column or columns to aggregate. + index : column, Grouper, array, or list of the previous + Keys to group by on the pivot table index. If a list is passed, + it can contain any of the other types (except list). If an array is + passed, it must be the same length as the data and will be used in + the same manner as column values. + columns : column, Grouper, array, or list of the previous + Keys to group by on the pivot table column. If a list is passed, + it can contain any of the other types (except list). If an array is + passed, it must be the same length as the data and will be used in + the same manner as column values. + aggfunc : function, list of functions, dict, default "mean" + If a list of functions is passed, the resulting pivot table will have + hierarchical columns whose top level are the function names + (inferred from the function objects themselves). + If a dict is passed, the key is column to aggregate and the value is + function or list of functions. If ``margin=True``, aggfunc will be + used to calculate the partial aggregates. + fill_value : scalar, default None + Value to replace missing values with (in the resulting pivot table, + after aggregation). + margins : bool, default False + If ``margins=True``, special ``All`` columns and rows + will be added with partial group aggregates across the categories + on the rows and columns. + dropna : bool, default True + Do not include columns whose entries are all NaN. If True, + rows with a NaN value in any column will be omitted before + computing margins. + margins_name : str, default 'All' + Name of the row / column that will contain the totals + when margins is True. + observed : bool, default False + This only applies if any of the groupers are Categoricals. + If True: only show observed values for categorical groupers. + If False: show all values for categorical groupers. + + .. deprecated:: 2.2.0 + + The default value of ``False`` is deprecated and will change to + ``True`` in a future version of pandas. + + sort : bool, default True + Specifies if the result should be sorted. + + .. versionadded:: 1.3.0 + + Returns + ------- + DataFrame + An Excel style pivot table. + + See Also + -------- + DataFrame.pivot : Pivot without aggregation that can handle + non-numeric data. + DataFrame.melt: Unpivot a DataFrame from wide to long format, + optionally leaving identifiers set. + wide_to_long : Wide panel to long format. Less flexible but more + user-friendly than melt. + + Notes + ----- + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", + ... "bar", "bar", "bar", "bar"], + ... "B": ["one", "one", "one", "two", "two", + ... "one", "one", "two", "two"], + ... "C": ["small", "large", "large", "small", + ... "small", "large", "small", "small", + ... "large"], + ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) + >>> df + A B C D E + 0 foo one small 1 2 + 1 foo one large 2 4 + 2 foo one large 2 5 + 3 foo two small 3 5 + 4 foo two small 3 6 + 5 bar one large 4 6 + 6 bar one small 5 8 + 7 bar two small 6 9 + 8 bar two large 7 9 + + This first example aggregates values by taking the sum. + + >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], + ... columns=['C'], aggfunc="sum") + >>> table + C large small + A B + bar one 4.0 5.0 + two 7.0 6.0 + foo one 4.0 1.0 + two NaN 6.0 + + We can also fill missing values using the `fill_value` parameter. + + >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], + ... columns=['C'], aggfunc="sum", fill_value=0) + >>> table + C large small + A B + bar one 4 5 + two 7 6 + foo one 4 1 + two 0 6 + + The next example aggregates by taking the mean across multiple columns. + + >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], + ... aggfunc={'D': "mean", 'E': "mean"}) + >>> table + D E + A C + bar large 5.500000 7.500000 + small 5.500000 8.500000 + foo large 2.000000 4.500000 + small 2.333333 4.333333 + + We can also calculate multiple types of aggregations for any given + value column. + + >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], + ... aggfunc={'D': "mean", + ... 'E': ["min", "max", "mean"]}) + >>> table + D E + mean max mean min + A C + bar large 5.500000 9 7.500000 6 + small 5.500000 9 8.500000 8 + foo large 2.000000 5 4.500000 4 + small 2.333333 6 4.333333 2 + """ + + @Substitution("") + @Appender(_shared_docs["pivot_table"]) + def pivot_table( + self, + values=None, + index=None, + columns=None, + aggfunc: AggFuncType = "mean", + fill_value=None, + margins: bool = False, + dropna: bool = True, + margins_name: Level = "All", + observed: bool | lib.NoDefault = lib.no_default, + sort: bool = True, + ) -> DataFrame: + from pandas.core.reshape.pivot import pivot_table + + return pivot_table( + self, + values=values, + index=index, + columns=columns, + aggfunc=aggfunc, + fill_value=fill_value, + margins=margins, + dropna=dropna, + margins_name=margins_name, + observed=observed, + sort=sort, + ) + + def stack( + self, + level: IndexLabel = -1, + dropna: bool | lib.NoDefault = lib.no_default, + sort: bool | lib.NoDefault = lib.no_default, + future_stack: bool = False, + ): + """ + Stack the prescribed level(s) from columns to index. + + Return a reshaped DataFrame or Series having a multi-level + index with one or more new inner-most levels compared to the current + DataFrame. The new inner-most levels are created by pivoting the + columns of the current dataframe: + + - if the columns have a single level, the output is a Series; + - if the columns have multiple levels, the new index + level(s) is (are) taken from the prescribed level(s) and + the output is a DataFrame. + + Parameters + ---------- + level : int, str, list, default -1 + Level(s) to stack from the column axis onto the index + axis, defined as one index or label, or a list of indices + or labels. + dropna : bool, default True + Whether to drop rows in the resulting Frame/Series with + missing values. Stacking a column level onto the index + axis can create combinations of index and column values + that are missing from the original dataframe. See Examples + section. + sort : bool, default True + Whether to sort the levels of the resulting MultiIndex. + future_stack : bool, default False + Whether to use the new implementation that will replace the current + implementation in pandas 3.0. When True, dropna and sort have no impact + on the result and must remain unspecified. See :ref:`pandas 2.1.0 Release + notes ` for more details. + + Returns + ------- + DataFrame or Series + Stacked dataframe or series. + + See Also + -------- + DataFrame.unstack : Unstack prescribed level(s) from index axis + onto column axis. + DataFrame.pivot : Reshape dataframe from long format to wide + format. + DataFrame.pivot_table : Create a spreadsheet-style pivot table + as a DataFrame. + + Notes + ----- + The function is named by analogy with a collection of books + being reorganized from being side by side on a horizontal + position (the columns of the dataframe) to being stacked + vertically on top of each other (in the index of the + dataframe). + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + **Single level columns** + + >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]], + ... index=['cat', 'dog'], + ... columns=['weight', 'height']) + + Stacking a dataframe with a single level column axis returns a Series: + + >>> df_single_level_cols + weight height + cat 0 1 + dog 2 3 + >>> df_single_level_cols.stack(future_stack=True) + cat weight 0 + height 1 + dog weight 2 + height 3 + dtype: int64 + + **Multi level columns: simple case** + + >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), + ... ('weight', 'pounds')]) + >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]], + ... index=['cat', 'dog'], + ... columns=multicol1) + + Stacking a dataframe with a multi-level column axis: + + >>> df_multi_level_cols1 + weight + kg pounds + cat 1 2 + dog 2 4 + >>> df_multi_level_cols1.stack(future_stack=True) + weight + cat kg 1 + pounds 2 + dog kg 2 + pounds 4 + + **Missing values** + + >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), + ... ('height', 'm')]) + >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], + ... index=['cat', 'dog'], + ... columns=multicol2) + + It is common to have missing values when stacking a dataframe + with multi-level columns, as the stacked dataframe typically + has more values than the original dataframe. Missing values + are filled with NaNs: + + >>> df_multi_level_cols2 + weight height + kg m + cat 1.0 2.0 + dog 3.0 4.0 + >>> df_multi_level_cols2.stack(future_stack=True) + weight height + cat kg 1.0 NaN + m NaN 2.0 + dog kg 3.0 NaN + m NaN 4.0 + + **Prescribing the level(s) to be stacked** + + The first parameter controls which level or levels are stacked: + + >>> df_multi_level_cols2.stack(0, future_stack=True) + kg m + cat weight 1.0 NaN + height NaN 2.0 + dog weight 3.0 NaN + height NaN 4.0 + >>> df_multi_level_cols2.stack([0, 1], future_stack=True) + cat weight kg 1.0 + height m 2.0 + dog weight kg 3.0 + height m 4.0 + dtype: float64 + """ + if not future_stack: + from pandas.core.reshape.reshape import ( + stack, + stack_multiple, + ) + + if ( + dropna is not lib.no_default + or sort is not lib.no_default + or self.columns.nlevels > 1 + ): + warnings.warn( + "The previous implementation of stack is deprecated and will be " + "removed in a future version of pandas. See the What's New notes " + "for pandas 2.1.0 for details. Specify future_stack=True to adopt " + "the new implementation and silence this warning.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if dropna is lib.no_default: + dropna = True + if sort is lib.no_default: + sort = True + + if isinstance(level, (tuple, list)): + result = stack_multiple(self, level, dropna=dropna, sort=sort) + else: + result = stack(self, level, dropna=dropna, sort=sort) + else: + from pandas.core.reshape.reshape import stack_v3 + + if dropna is not lib.no_default: + raise ValueError( + "dropna must be unspecified with future_stack=True as the new " + "implementation does not introduce rows of NA values. This " + "argument will be removed in a future version of pandas." + ) + + if sort is not lib.no_default: + raise ValueError( + "Cannot specify sort with future_stack=True, this argument will be " + "removed in a future version of pandas. Sort the result using " + ".sort_index instead." + ) + + if ( + isinstance(level, (tuple, list)) + and not all(lev in self.columns.names for lev in level) + and not all(isinstance(lev, int) for lev in level) + ): + raise ValueError( + "level should contain all level names or all level " + "numbers, not a mixture of the two." + ) + + if not isinstance(level, (tuple, list)): + level = [level] + level = [self.columns._get_level_number(lev) for lev in level] + result = stack_v3(self, level) + + return result.__finalize__(self, method="stack") + + def explode( + self, + column: IndexLabel, + ignore_index: bool = False, + ) -> DataFrame: + """ + Transform each element of a list-like to a row, replicating index values. + + Parameters + ---------- + column : IndexLabel + Column(s) to explode. + For multiple columns, specify a non-empty list with each element + be str or tuple, and all specified columns their list-like data + on same row of the frame must have matching length. + + .. versionadded:: 1.3.0 + Multi-column explode + + ignore_index : bool, default False + If True, the resulting index will be labeled 0, 1, …, n - 1. + + Returns + ------- + DataFrame + Exploded lists to rows of the subset columns; + index will be duplicated for these rows. + + Raises + ------ + ValueError : + * If columns of the frame are not unique. + * If specified columns to explode is empty list. + * If specified columns to explode have not matching count of + elements rowwise in the frame. + + See Also + -------- + DataFrame.unstack : Pivot a level of the (necessarily hierarchical) + index labels. + DataFrame.melt : Unpivot a DataFrame from wide format to long format. + Series.explode : Explode a DataFrame from list-like columns to long format. + + Notes + ----- + This routine will explode list-likes including lists, tuples, sets, + Series, and np.ndarray. The result dtype of the subset rows will + be object. Scalars will be returned unchanged, and empty list-likes will + result in a np.nan for that row. In addition, the ordering of rows in the + output will be non-deterministic when exploding sets. + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> df = pd.DataFrame({'A': [[0, 1, 2], 'foo', [], [3, 4]], + ... 'B': 1, + ... 'C': [['a', 'b', 'c'], np.nan, [], ['d', 'e']]}) + >>> df + A B C + 0 [0, 1, 2] 1 [a, b, c] + 1 foo 1 NaN + 2 [] 1 [] + 3 [3, 4] 1 [d, e] + + Single-column explode. + + >>> df.explode('A') + A B C + 0 0 1 [a, b, c] + 0 1 1 [a, b, c] + 0 2 1 [a, b, c] + 1 foo 1 NaN + 2 NaN 1 [] + 3 3 1 [d, e] + 3 4 1 [d, e] + + Multi-column explode. + + >>> df.explode(list('AC')) + A B C + 0 0 1 a + 0 1 1 b + 0 2 1 c + 1 foo 1 NaN + 2 NaN 1 NaN + 3 3 1 d + 3 4 1 e + """ + if not self.columns.is_unique: + duplicate_cols = self.columns[self.columns.duplicated()].tolist() + raise ValueError( + f"DataFrame columns must be unique. Duplicate columns: {duplicate_cols}" + ) + + columns: list[Hashable] + if is_scalar(column) or isinstance(column, tuple): + columns = [column] + elif isinstance(column, list) and all( + is_scalar(c) or isinstance(c, tuple) for c in column + ): + if not column: + raise ValueError("column must be nonempty") + if len(column) > len(set(column)): + raise ValueError("column must be unique") + columns = column + else: + raise ValueError("column must be a scalar, tuple, or list thereof") + + df = self.reset_index(drop=True) + if len(columns) == 1: + result = df[columns[0]].explode() + else: + mylen = lambda x: len(x) if (is_list_like(x) and len(x) > 0) else 1 + counts0 = self[columns[0]].apply(mylen) + for c in columns[1:]: + if not all(counts0 == self[c].apply(mylen)): + raise ValueError("columns must have matching element counts") + result = DataFrame({c: df[c].explode() for c in columns}) + result = df.drop(columns, axis=1).join(result) + if ignore_index: + result.index = default_index(len(result)) + else: + result.index = self.index.take(result.index) + result = result.reindex(columns=self.columns, copy=False) + + return result.__finalize__(self, method="explode") + + def unstack(self, level: IndexLabel = -1, fill_value=None, sort: bool = True): + """ + Pivot a level of the (necessarily hierarchical) index labels. + + Returns a DataFrame having a new level of column labels whose inner-most level + consists of the pivoted index labels. + + If the index is not a MultiIndex, the output will be a Series + (the analogue of stack when the columns are not a MultiIndex). + + Parameters + ---------- + level : int, str, or list of these, default -1 (last level) + Level(s) of index to unstack, can pass level name. + fill_value : int, str or dict + Replace NaN with this value if the unstack produces missing values. + sort : bool, default True + Sort the level(s) in the resulting MultiIndex columns. + + Returns + ------- + Series or DataFrame + + See Also + -------- + DataFrame.pivot : Pivot a table based on column values. + DataFrame.stack : Pivot a level of the column labels (inverse operation + from `unstack`). + + Notes + ----- + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), + ... ('two', 'a'), ('two', 'b')]) + >>> s = pd.Series(np.arange(1.0, 5.0), index=index) + >>> s + one a 1.0 + b 2.0 + two a 3.0 + b 4.0 + dtype: float64 + + >>> s.unstack(level=-1) + a b + one 1.0 2.0 + two 3.0 4.0 + + >>> s.unstack(level=0) + one two + a 1.0 3.0 + b 2.0 4.0 + + >>> df = s.unstack(level=0) + >>> df.unstack() + one a 1.0 + b 2.0 + two a 3.0 + b 4.0 + dtype: float64 + """ + from pandas.core.reshape.reshape import unstack + + result = unstack(self, level, fill_value, sort) + + return result.__finalize__(self, method="unstack") + + @Appender(_shared_docs["melt"] % {"caller": "df.melt(", "other": "melt"}) + def melt( + self, + id_vars=None, + value_vars=None, + var_name=None, + value_name: Hashable = "value", + col_level: Level | None = None, + ignore_index: bool = True, + ) -> DataFrame: + return melt( + self, + id_vars=id_vars, + value_vars=value_vars, + var_name=var_name, + value_name=value_name, + col_level=col_level, + ignore_index=ignore_index, + ).__finalize__(self, method="melt") + + # ---------------------------------------------------------------------- + # Time series-related + + @doc( + Series.diff, + klass="DataFrame", + extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n " + "Take difference over rows (0) or columns (1).\n", + other_klass="Series", + examples=dedent( + """ + Difference with previous row + + >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], + ... 'b': [1, 1, 2, 3, 5, 8], + ... 'c': [1, 4, 9, 16, 25, 36]}) + >>> df + a b c + 0 1 1 1 + 1 2 1 4 + 2 3 2 9 + 3 4 3 16 + 4 5 5 25 + 5 6 8 36 + + >>> df.diff() + a b c + 0 NaN NaN NaN + 1 1.0 0.0 3.0 + 2 1.0 1.0 5.0 + 3 1.0 1.0 7.0 + 4 1.0 2.0 9.0 + 5 1.0 3.0 11.0 + + Difference with previous column + + >>> df.diff(axis=1) + a b c + 0 NaN 0 0 + 1 NaN -1 3 + 2 NaN -1 7 + 3 NaN -1 13 + 4 NaN 0 20 + 5 NaN 2 28 + + Difference with 3rd previous row + + >>> df.diff(periods=3) + a b c + 0 NaN NaN NaN + 1 NaN NaN NaN + 2 NaN NaN NaN + 3 3.0 2.0 15.0 + 4 3.0 4.0 21.0 + 5 3.0 6.0 27.0 + + Difference with following row + + >>> df.diff(periods=-1) + a b c + 0 -1.0 0.0 -3.0 + 1 -1.0 -1.0 -5.0 + 2 -1.0 -1.0 -7.0 + 3 -1.0 -2.0 -9.0 + 4 -1.0 -3.0 -11.0 + 5 NaN NaN NaN + + Overflow in input dtype + + >>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8) + >>> df.diff() + a + 0 NaN + 1 255.0""" + ), + ) + def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame: + if not lib.is_integer(periods): + if not (is_float(periods) and periods.is_integer()): + raise ValueError("periods must be an integer") + periods = int(periods) + + axis = self._get_axis_number(axis) + if axis == 1: + if periods != 0: + # in the periods == 0 case, this is equivalent diff of 0 periods + # along axis=0, and the Manager method may be somewhat more + # performant, so we dispatch in that case. + return self - self.shift(periods, axis=axis) + # With periods=0 this is equivalent to a diff with axis=0 + axis = 0 + + new_data = self._mgr.diff(n=periods) + res_df = self._constructor_from_mgr(new_data, axes=new_data.axes) + return res_df.__finalize__(self, "diff") + + # ---------------------------------------------------------------------- + # Function application + + def _gotitem( + self, + key: IndexLabel, + ndim: int, + subset: DataFrame | Series | None = None, + ) -> DataFrame | Series: + """ + Sub-classes to define. Return a sliced object. + + Parameters + ---------- + key : string / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + if subset is None: + subset = self + elif subset.ndim == 1: # is Series + return subset + + # TODO: _shallow_copy(subset)? + return subset[key] + + _agg_see_also_doc = dedent( + """ + See Also + -------- + DataFrame.apply : Perform any type of operations. + DataFrame.transform : Perform transformation type operations. + pandas.DataFrame.groupby : Perform operations over groups. + pandas.DataFrame.resample : Perform operations over resampled bins. + pandas.DataFrame.rolling : Perform operations over rolling window. + pandas.DataFrame.expanding : Perform operations over expanding window. + pandas.core.window.ewm.ExponentialMovingWindow : Perform operation over exponential + weighted window. + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> df = pd.DataFrame([[1, 2, 3], + ... [4, 5, 6], + ... [7, 8, 9], + ... [np.nan, np.nan, np.nan]], + ... columns=['A', 'B', 'C']) + + Aggregate these functions over the rows. + + >>> df.agg(['sum', 'min']) + A B C + sum 12.0 15.0 18.0 + min 1.0 2.0 3.0 + + Different aggregations per column. + + >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']}) + A B + sum 12.0 NaN + min 1.0 2.0 + max NaN 8.0 + + Aggregate different functions over the columns and rename the index of the resulting + DataFrame. + + >>> df.agg(x=('A', 'max'), y=('B', 'min'), z=('C', 'mean')) + A B C + x 7.0 NaN NaN + y NaN 2.0 NaN + z NaN NaN 6.0 + + Aggregate over the columns. + + >>> df.agg("mean", axis="columns") + 0 2.0 + 1 5.0 + 2 8.0 + 3 NaN + dtype: float64 + """ + ) + + @doc( + _shared_docs["aggregate"], + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + ) + def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): + from pandas.core.apply import frame_apply + + axis = self._get_axis_number(axis) + + op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs) + result = op.agg() + result = reconstruct_and_relabel_result(result, func, **kwargs) + return result + + agg = aggregate + + @doc( + _shared_docs["transform"], + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], + ) + def transform( + self, func: AggFuncType, axis: Axis = 0, *args, **kwargs + ) -> DataFrame: + from pandas.core.apply import frame_apply + + op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs) + result = op.transform() + assert isinstance(result, DataFrame) + return result + + def apply( + self, + func: AggFuncType, + axis: Axis = 0, + raw: bool = False, + result_type: Literal["expand", "reduce", "broadcast"] | None = None, + args=(), + by_row: Literal[False, "compat"] = "compat", + engine: Literal["python", "numba"] = "python", + engine_kwargs: dict[str, bool] | None = None, + **kwargs, + ): + """ + Apply a function along an axis of the DataFrame. + + Objects passed to the function are Series objects whose index is + either the DataFrame's index (``axis=0``) or the DataFrame's columns + (``axis=1``). By default (``result_type=None``), the final return type + is inferred from the return type of the applied function. Otherwise, + it depends on the `result_type` argument. + + Parameters + ---------- + func : function + Function to apply to each column or row. + axis : {0 or 'index', 1 or 'columns'}, default 0 + Axis along which the function is applied: + + * 0 or 'index': apply function to each column. + * 1 or 'columns': apply function to each row. + + raw : bool, default False + Determines if row or column is passed as a Series or ndarray object: + + * ``False`` : passes each row or column as a Series to the + function. + * ``True`` : the passed function will receive ndarray objects + instead. + If you are just applying a NumPy reduction function this will + achieve much better performance. + + result_type : {'expand', 'reduce', 'broadcast', None}, default None + These only act when ``axis=1`` (columns): + + * 'expand' : list-like results will be turned into columns. + * 'reduce' : returns a Series if possible rather than expanding + list-like results. This is the opposite of 'expand'. + * 'broadcast' : results will be broadcast to the original shape + of the DataFrame, the original index and columns will be + retained. + + The default behaviour (None) depends on the return value of the + applied function: list-like results will be returned as a Series + of those. However if the apply function returns a Series these + are expanded to columns. + args : tuple + Positional arguments to pass to `func` in addition to the + array/series. + by_row : False or "compat", default "compat" + Only has an effect when ``func`` is a listlike or dictlike of funcs + and the func isn't a string. + If "compat", will if possible first translate the func into pandas + methods (e.g. ``Series().apply(np.sum)`` will be translated to + ``Series().sum()``). If that doesn't work, will try call to apply again with + ``by_row=True`` and if that fails, will call apply again with + ``by_row=False`` (backward compatible). + If False, the funcs will be passed the whole Series at once. + + .. versionadded:: 2.1.0 + + engine : {'python', 'numba'}, default 'python' + Choose between the python (default) engine or the numba engine in apply. + + The numba engine will attempt to JIT compile the passed function, + which may result in speedups for large DataFrames. + It also supports the following engine_kwargs : + + - nopython (compile the function in nopython mode) + - nogil (release the GIL inside the JIT compiled function) + - parallel (try to apply the function in parallel over the DataFrame) + + Note: Due to limitations within numba/how pandas interfaces with numba, + you should only use this if raw=True + + Note: The numba compiler only supports a subset of + valid Python/numpy operations. + + Please read more about the `supported python features + `_ + and `supported numpy features + `_ + in numba to learn what you can or cannot use in the passed function. + + .. versionadded:: 2.2.0 + + engine_kwargs : dict + Pass keyword arguments to the engine. + This is currently only used by the numba engine, + see the documentation for the engine argument for more information. + **kwargs + Additional keyword arguments to pass as keywords arguments to + `func`. + + Returns + ------- + Series or DataFrame + Result of applying ``func`` along the given axis of the + DataFrame. + + See Also + -------- + DataFrame.map: For elementwise operations. + DataFrame.aggregate: Only perform aggregating type operations. + DataFrame.transform: Only perform transforming type operations. + + Notes + ----- + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) + >>> df + A B + 0 4 9 + 1 4 9 + 2 4 9 + + Using a numpy universal function (in this case the same as + ``np.sqrt(df)``): + + >>> df.apply(np.sqrt) + A B + 0 2.0 3.0 + 1 2.0 3.0 + 2 2.0 3.0 + + Using a reducing function on either axis + + >>> df.apply(np.sum, axis=0) + A 12 + B 27 + dtype: int64 + + >>> df.apply(np.sum, axis=1) + 0 13 + 1 13 + 2 13 + dtype: int64 + + Returning a list-like will result in a Series + + >>> df.apply(lambda x: [1, 2], axis=1) + 0 [1, 2] + 1 [1, 2] + 2 [1, 2] + dtype: object + + Passing ``result_type='expand'`` will expand list-like results + to columns of a Dataframe + + >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand') + 0 1 + 0 1 2 + 1 1 2 + 2 1 2 + + Returning a Series inside the function is similar to passing + ``result_type='expand'``. The resulting column names + will be the Series index. + + >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1) + foo bar + 0 1 2 + 1 1 2 + 2 1 2 + + Passing ``result_type='broadcast'`` will ensure the same shape + result, whether list-like or scalar is returned by the function, + and broadcast it along the axis. The resulting column names will + be the originals. + + >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast') + A B + 0 1 2 + 1 1 2 + 2 1 2 + """ + from pandas.core.apply import frame_apply + + op = frame_apply( + self, + func=func, + axis=axis, + raw=raw, + result_type=result_type, + by_row=by_row, + engine=engine, + engine_kwargs=engine_kwargs, + args=args, + kwargs=kwargs, + ) + return op.apply().__finalize__(self, method="apply") + + def map( + self, func: PythonFuncType, na_action: str | None = None, **kwargs + ) -> DataFrame: + """ + Apply a function to a Dataframe elementwise. + + .. versionadded:: 2.1.0 + + DataFrame.applymap was deprecated and renamed to DataFrame.map. + + This method applies a function that accepts and returns a scalar + to every element of a DataFrame. + + Parameters + ---------- + func : callable + Python function, returns a single value from a single value. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NaN values, without passing them to func. + **kwargs + Additional keyword arguments to pass as keywords arguments to + `func`. + + Returns + ------- + DataFrame + Transformed DataFrame. + + See Also + -------- + DataFrame.apply : Apply a function along input axis of DataFrame. + DataFrame.replace: Replace values given in `to_replace` with `value`. + Series.map : Apply a function elementwise on a Series. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) + >>> df + 0 1 + 0 1.000 2.120 + 1 3.356 4.567 + + >>> df.map(lambda x: len(str(x))) + 0 1 + 0 3 4 + 1 5 5 + + Like Series.map, NA values can be ignored: + + >>> df_copy = df.copy() + >>> df_copy.iloc[0, 0] = pd.NA + >>> df_copy.map(lambda x: len(str(x)), na_action='ignore') + 0 1 + 0 NaN 4 + 1 5.0 5 + + It is also possible to use `map` with functions that are not + `lambda` functions: + + >>> df.map(round, ndigits=1) + 0 1 + 0 1.0 2.1 + 1 3.4 4.6 + + Note that a vectorized version of `func` often exists, which will + be much faster. You could square each number elementwise. + + >>> df.map(lambda x: x**2) + 0 1 + 0 1.000000 4.494400 + 1 11.262736 20.857489 + + But it's better to avoid map in that case. + + >>> df ** 2 + 0 1 + 0 1.000000 4.494400 + 1 11.262736 20.857489 + """ + if na_action not in {"ignore", None}: + raise ValueError( + f"na_action must be 'ignore' or None. Got {repr(na_action)}" + ) + + if self.empty: + return self.copy() + + func = functools.partial(func, **kwargs) + + def infer(x): + return x._map_values(func, na_action=na_action) + + return self.apply(infer).__finalize__(self, "map") + + def applymap( + self, func: PythonFuncType, na_action: NaAction | None = None, **kwargs + ) -> DataFrame: + """ + Apply a function to a Dataframe elementwise. + + .. deprecated:: 2.1.0 + + DataFrame.applymap has been deprecated. Use DataFrame.map instead. + + This method applies a function that accepts and returns a scalar + to every element of a DataFrame. + + Parameters + ---------- + func : callable + Python function, returns a single value from a single value. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NaN values, without passing them to func. + **kwargs + Additional keyword arguments to pass as keywords arguments to + `func`. + + Returns + ------- + DataFrame + Transformed DataFrame. + + See Also + -------- + DataFrame.apply : Apply a function along input axis of DataFrame. + DataFrame.map : Apply a function along input axis of DataFrame. + DataFrame.replace: Replace values given in `to_replace` with `value`. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) + >>> df + 0 1 + 0 1.000 2.120 + 1 3.356 4.567 + + >>> df.map(lambda x: len(str(x))) + 0 1 + 0 3 4 + 1 5 5 + """ + warnings.warn( + "DataFrame.applymap has been deprecated. Use DataFrame.map instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.map(func, na_action=na_action, **kwargs) + + # ---------------------------------------------------------------------- + # Merging / joining methods + + def _append( + self, + other, + ignore_index: bool = False, + verify_integrity: bool = False, + sort: bool = False, + ) -> DataFrame: + if isinstance(other, (Series, dict)): + if isinstance(other, dict): + if not ignore_index: + raise TypeError("Can only append a dict if ignore_index=True") + other = Series(other) + if other.name is None and not ignore_index: + raise TypeError( + "Can only append a Series if ignore_index=True " + "or if the Series has a name" + ) + + index = Index( + [other.name], + name=self.index.names + if isinstance(self.index, MultiIndex) + else self.index.name, + ) + row_df = other.to_frame().T + # infer_objects is needed for + # test_append_empty_frame_to_series_with_dateutil_tz + other = row_df.infer_objects(copy=False).rename_axis( + index.names, copy=False + ) + elif isinstance(other, list): + if not other: + pass + elif not isinstance(other[0], DataFrame): + other = DataFrame(other) + if self.index.name is not None and not ignore_index: + other.index.name = self.index.name + + from pandas.core.reshape.concat import concat + + if isinstance(other, (list, tuple)): + to_concat = [self, *other] + else: + to_concat = [self, other] + + result = concat( + to_concat, + ignore_index=ignore_index, + verify_integrity=verify_integrity, + sort=sort, + ) + return result.__finalize__(self, method="append") + + def join( + self, + other: DataFrame | Series | Iterable[DataFrame | Series], + on: IndexLabel | None = None, + how: MergeHow = "left", + lsuffix: str = "", + rsuffix: str = "", + sort: bool = False, + validate: JoinValidate | None = None, + ) -> DataFrame: + """ + Join columns of another DataFrame. + + Join columns with `other` DataFrame either on index or on a key + column. Efficiently join multiple DataFrame objects by index at once by + passing a list. + + Parameters + ---------- + other : DataFrame, Series, or a list containing any combination of them + Index should be similar to one of the columns in this one. If a + Series is passed, its name attribute must be set, and that will be + used as the column name in the resulting joined DataFrame. + on : str, list of str, or array-like, optional + Column or index level name(s) in the caller to join on the index + in `other`, otherwise joins index-on-index. If multiple + values given, the `other` DataFrame must have a MultiIndex. Can + pass an array as the join key if it is not already contained in + the calling DataFrame. Like an Excel VLOOKUP operation. + how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'left' + How to handle the operation of the two objects. + + * left: use calling frame's index (or column if on is specified) + * right: use `other`'s index. + * outer: form union of calling frame's index (or column if on is + specified) with `other`'s index, and sort it lexicographically. + * inner: form intersection of calling frame's index (or column if + on is specified) with `other`'s index, preserving the order + of the calling's one. + * cross: creates the cartesian product from both frames, preserves the order + of the left keys. + lsuffix : str, default '' + Suffix to use from left frame's overlapping columns. + rsuffix : str, default '' + Suffix to use from right frame's overlapping columns. + sort : bool, default False + Order result DataFrame lexicographically by the join key. If False, + the order of the join key depends on the join type (how keyword). + validate : str, optional + If specified, checks if join is of specified type. + + * "one_to_one" or "1:1": check if join keys are unique in both left + and right datasets. + * "one_to_many" or "1:m": check if join keys are unique in left dataset. + * "many_to_one" or "m:1": check if join keys are unique in right dataset. + * "many_to_many" or "m:m": allowed, but does not result in checks. + + .. versionadded:: 1.5.0 + + Returns + ------- + DataFrame + A dataframe containing columns from both the caller and `other`. + + See Also + -------- + DataFrame.merge : For column(s)-on-column(s) operations. + + Notes + ----- + Parameters `on`, `lsuffix`, and `rsuffix` are not supported when + passing a list of `DataFrame` objects. + + Examples + -------- + >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], + ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) + + >>> df + key A + 0 K0 A0 + 1 K1 A1 + 2 K2 A2 + 3 K3 A3 + 4 K4 A4 + 5 K5 A5 + + >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], + ... 'B': ['B0', 'B1', 'B2']}) + + >>> other + key B + 0 K0 B0 + 1 K1 B1 + 2 K2 B2 + + Join DataFrames using their indexes. + + >>> df.join(other, lsuffix='_caller', rsuffix='_other') + key_caller A key_other B + 0 K0 A0 K0 B0 + 1 K1 A1 K1 B1 + 2 K2 A2 K2 B2 + 3 K3 A3 NaN NaN + 4 K4 A4 NaN NaN + 5 K5 A5 NaN NaN + + If we want to join using the key columns, we need to set key to be + the index in both `df` and `other`. The joined DataFrame will have + key as its index. + + >>> df.set_index('key').join(other.set_index('key')) + A B + key + K0 A0 B0 + K1 A1 B1 + K2 A2 B2 + K3 A3 NaN + K4 A4 NaN + K5 A5 NaN + + Another option to join using the key columns is to use the `on` + parameter. DataFrame.join always uses `other`'s index but we can use + any column in `df`. This method preserves the original DataFrame's + index in the result. + + >>> df.join(other.set_index('key'), on='key') + key A B + 0 K0 A0 B0 + 1 K1 A1 B1 + 2 K2 A2 B2 + 3 K3 A3 NaN + 4 K4 A4 NaN + 5 K5 A5 NaN + + Using non-unique key values shows how they are matched. + + >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K1', 'K3', 'K0', 'K1'], + ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) + + >>> df + key A + 0 K0 A0 + 1 K1 A1 + 2 K1 A2 + 3 K3 A3 + 4 K0 A4 + 5 K1 A5 + + >>> df.join(other.set_index('key'), on='key', validate='m:1') + key A B + 0 K0 A0 B0 + 1 K1 A1 B1 + 2 K1 A2 B1 + 3 K3 A3 NaN + 4 K0 A4 B0 + 5 K1 A5 B1 + """ + from pandas.core.reshape.concat import concat + from pandas.core.reshape.merge import merge + + if isinstance(other, Series): + if other.name is None: + raise ValueError("Other Series must have a name") + other = DataFrame({other.name: other}) + + if isinstance(other, DataFrame): + if how == "cross": + return merge( + self, + other, + how=how, + on=on, + suffixes=(lsuffix, rsuffix), + sort=sort, + validate=validate, + ) + return merge( + self, + other, + left_on=on, + how=how, + left_index=on is None, + right_index=True, + suffixes=(lsuffix, rsuffix), + sort=sort, + validate=validate, + ) + else: + if on is not None: + raise ValueError( + "Joining multiple DataFrames only supported for joining on index" + ) + + if rsuffix or lsuffix: + raise ValueError( + "Suffixes not supported when joining multiple DataFrames" + ) + + # Mypy thinks the RHS is a + # "Union[DataFrame, Series, Iterable[Union[DataFrame, Series]]]" whereas + # the LHS is an "Iterable[DataFrame]", but in reality both types are + # "Iterable[Union[DataFrame, Series]]" due to the if statements + frames = [cast("DataFrame | Series", self)] + list(other) + + can_concat = all(df.index.is_unique for df in frames) + + # join indexes only using concat + if can_concat: + if how == "left": + res = concat( + frames, axis=1, join="outer", verify_integrity=True, sort=sort + ) + return res.reindex(self.index, copy=False) + else: + return concat( + frames, axis=1, join=how, verify_integrity=True, sort=sort + ) + + joined = frames[0] + + for frame in frames[1:]: + joined = merge( + joined, + frame, + how=how, + left_index=True, + right_index=True, + validate=validate, + ) + + return joined + + @Substitution("") + @Appender(_merge_doc, indents=2) + def merge( + self, + right: DataFrame | Series, + how: MergeHow = "inner", + on: IndexLabel | AnyArrayLike | None = None, + left_on: IndexLabel | AnyArrayLike | None = None, + right_on: IndexLabel | AnyArrayLike | None = None, + left_index: bool = False, + right_index: bool = False, + sort: bool = False, + suffixes: Suffixes = ("_x", "_y"), + copy: bool | None = None, + indicator: str | bool = False, + validate: MergeValidate | None = None, + ) -> DataFrame: + from pandas.core.reshape.merge import merge + + return merge( + self, + right, + how=how, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + sort=sort, + suffixes=suffixes, + copy=copy, + indicator=indicator, + validate=validate, + ) + + def round( + self, decimals: int | dict[IndexLabel, int] | Series = 0, *args, **kwargs + ) -> DataFrame: + """ + Round a DataFrame to a variable number of decimal places. + + Parameters + ---------- + decimals : int, dict, Series + Number of decimal places to round each column to. If an int is + given, round each column to the same number of places. + Otherwise dict and Series round to variable numbers of places. + Column names should be in the keys if `decimals` is a + dict-like, or in the index if `decimals` is a Series. Any + columns not included in `decimals` will be left as is. Elements + of `decimals` which are not columns of the input will be + ignored. + *args + Additional keywords have no effect but might be accepted for + compatibility with numpy. + **kwargs + Additional keywords have no effect but might be accepted for + compatibility with numpy. + + Returns + ------- + DataFrame + A DataFrame with the affected columns rounded to the specified + number of decimal places. + + See Also + -------- + numpy.around : Round a numpy array to the given number of decimals. + Series.round : Round a Series to the given number of decimals. + + Examples + -------- + >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)], + ... columns=['dogs', 'cats']) + >>> df + dogs cats + 0 0.21 0.32 + 1 0.01 0.67 + 2 0.66 0.03 + 3 0.21 0.18 + + By providing an integer each column is rounded to the same number + of decimal places + + >>> df.round(1) + dogs cats + 0 0.2 0.3 + 1 0.0 0.7 + 2 0.7 0.0 + 3 0.2 0.2 + + With a dict, the number of places for specific columns can be + specified with the column names as key and the number of decimal + places as value + + >>> df.round({'dogs': 1, 'cats': 0}) + dogs cats + 0 0.2 0.0 + 1 0.0 1.0 + 2 0.7 0.0 + 3 0.2 0.0 + + Using a Series, the number of places for specific columns can be + specified with the column names as index and the number of + decimal places as value + + >>> decimals = pd.Series([0, 1], index=['cats', 'dogs']) + >>> df.round(decimals) + dogs cats + 0 0.2 0.0 + 1 0.0 1.0 + 2 0.7 0.0 + 3 0.2 0.0 + """ + from pandas.core.reshape.concat import concat + + def _dict_round(df: DataFrame, decimals): + for col, vals in df.items(): + try: + yield _series_round(vals, decimals[col]) + except KeyError: + yield vals + + def _series_round(ser: Series, decimals: int) -> Series: + if is_integer_dtype(ser.dtype) or is_float_dtype(ser.dtype): + return ser.round(decimals) + return ser + + nv.validate_round(args, kwargs) + + if isinstance(decimals, (dict, Series)): + if isinstance(decimals, Series) and not decimals.index.is_unique: + raise ValueError("Index of decimals must be unique") + if is_dict_like(decimals) and not all( + is_integer(value) for _, value in decimals.items() + ): + raise TypeError("Values in decimals must be integers") + new_cols = list(_dict_round(self, decimals)) + elif is_integer(decimals): + # Dispatch to Block.round + # Argument "decimals" to "round" of "BaseBlockManager" has incompatible + # type "Union[int, integer[Any]]"; expected "int" + new_mgr = self._mgr.round( + decimals=decimals, # type: ignore[arg-type] + using_cow=using_copy_on_write(), + ) + return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__( + self, method="round" + ) + else: + raise TypeError("decimals must be an integer, a dict-like or a Series") + + if new_cols is not None and len(new_cols) > 0: + return self._constructor( + concat(new_cols, axis=1), index=self.index, columns=self.columns + ).__finalize__(self, method="round") + else: + return self.copy(deep=False) + + # ---------------------------------------------------------------------- + # Statistical methods, etc. + + def corr( + self, + method: CorrelationMethod = "pearson", + min_periods: int = 1, + numeric_only: bool = False, + ) -> DataFrame: + """ + Compute pairwise correlation of columns, excluding NA/null values. + + Parameters + ---------- + method : {'pearson', 'kendall', 'spearman'} or callable + Method of correlation: + + * pearson : standard correlation coefficient + * kendall : Kendall Tau correlation coefficient + * spearman : Spearman rank correlation + * callable: callable with input two 1d ndarrays + and returning a float. Note that the returned matrix from corr + will have 1 along the diagonals and will be symmetric + regardless of the callable's behavior. + min_periods : int, optional + Minimum number of observations required per pair of columns + to have a valid result. Currently only available for Pearson + and Spearman correlation. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + The default value of ``numeric_only`` is now ``False``. + + Returns + ------- + DataFrame + Correlation matrix. + + See Also + -------- + DataFrame.corrwith : Compute pairwise correlation with another + DataFrame or Series. + Series.corr : Compute the correlation between two Series. + + Notes + ----- + Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. + + * `Pearson correlation coefficient `_ + * `Kendall rank correlation coefficient `_ + * `Spearman's rank correlation coefficient `_ + + Examples + -------- + >>> def histogram_intersection(a, b): + ... v = np.minimum(a, b).sum().round(decimals=1) + ... return v + >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], + ... columns=['dogs', 'cats']) + >>> df.corr(method=histogram_intersection) + dogs cats + dogs 1.0 0.3 + cats 0.3 1.0 + + >>> df = pd.DataFrame([(1, 1), (2, np.nan), (np.nan, 3), (4, 4)], + ... columns=['dogs', 'cats']) + >>> df.corr(min_periods=3) + dogs cats + dogs 1.0 NaN + cats NaN 1.0 + """ # noqa: E501 + data = self._get_numeric_data() if numeric_only else self + cols = data.columns + idx = cols.copy() + mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False) + + if method == "pearson": + correl = libalgos.nancorr(mat, minp=min_periods) + elif method == "spearman": + correl = libalgos.nancorr_spearman(mat, minp=min_periods) + elif method == "kendall" or callable(method): + if min_periods is None: + min_periods = 1 + mat = mat.T + corrf = nanops.get_corr_func(method) + K = len(cols) + correl = np.empty((K, K), dtype=float) + mask = np.isfinite(mat) + for i, ac in enumerate(mat): + for j, bc in enumerate(mat): + if i > j: + continue + + valid = mask[i] & mask[j] + if valid.sum() < min_periods: + c = np.nan + elif i == j: + c = 1.0 + elif not valid.all(): + c = corrf(ac[valid], bc[valid]) + else: + c = corrf(ac, bc) + correl[i, j] = c + correl[j, i] = c + else: + raise ValueError( + "method must be either 'pearson', " + "'spearman', 'kendall', or a callable, " + f"'{method}' was supplied" + ) + + result = self._constructor(correl, index=idx, columns=cols, copy=False) + return result.__finalize__(self, method="corr") + + def cov( + self, + min_periods: int | None = None, + ddof: int | None = 1, + numeric_only: bool = False, + ) -> DataFrame: + """ + Compute pairwise covariance of columns, excluding NA/null values. + + Compute the pairwise covariance among the series of a DataFrame. + The returned data frame is the `covariance matrix + `__ of the columns + of the DataFrame. + + Both NA and null values are automatically excluded from the + calculation. (See the note below about bias from missing values.) + A threshold can be set for the minimum number of + observations for each value created. Comparisons with observations + below this threshold will be returned as ``NaN``. + + This method is generally used for the analysis of time series data to + understand the relationship between different measures + across time. + + Parameters + ---------- + min_periods : int, optional + Minimum number of observations required per pair of columns + to have a valid result. + + ddof : int, default 1 + Delta degrees of freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + This argument is applicable only when no ``nan`` is in the dataframe. + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + The default value of ``numeric_only`` is now ``False``. + + Returns + ------- + DataFrame + The covariance matrix of the series of the DataFrame. + + See Also + -------- + Series.cov : Compute covariance with another Series. + core.window.ewm.ExponentialMovingWindow.cov : Exponential weighted sample + covariance. + core.window.expanding.Expanding.cov : Expanding sample covariance. + core.window.rolling.Rolling.cov : Rolling sample covariance. + + Notes + ----- + Returns the covariance matrix of the DataFrame's time series. + The covariance is normalized by N-ddof. + + For DataFrames that have Series that are missing data (assuming that + data is `missing at random + `__) + the returned covariance matrix will be an unbiased estimate + of the variance and covariance between the member Series. + + However, for many applications this estimate may not be acceptable + because the estimate covariance matrix is not guaranteed to be positive + semi-definite. This could lead to estimate correlations having + absolute values which are greater than one, and/or a non-invertible + covariance matrix. See `Estimation of covariance matrices + `__ for more details. + + Examples + -------- + >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)], + ... columns=['dogs', 'cats']) + >>> df.cov() + dogs cats + dogs 0.666667 -1.000000 + cats -1.000000 1.666667 + + >>> np.random.seed(42) + >>> df = pd.DataFrame(np.random.randn(1000, 5), + ... columns=['a', 'b', 'c', 'd', 'e']) + >>> df.cov() + a b c d e + a 0.998438 -0.020161 0.059277 -0.008943 0.014144 + b -0.020161 1.059352 -0.008543 -0.024738 0.009826 + c 0.059277 -0.008543 1.010670 -0.001486 -0.000271 + d -0.008943 -0.024738 -0.001486 0.921297 -0.013692 + e 0.014144 0.009826 -0.000271 -0.013692 0.977795 + + **Minimum number of periods** + + This method also supports an optional ``min_periods`` keyword + that specifies the required minimum number of non-NA observations for + each column pair in order to have a valid result: + + >>> np.random.seed(42) + >>> df = pd.DataFrame(np.random.randn(20, 3), + ... columns=['a', 'b', 'c']) + >>> df.loc[df.index[:5], 'a'] = np.nan + >>> df.loc[df.index[5:10], 'b'] = np.nan + >>> df.cov(min_periods=12) + a b c + a 0.316741 NaN -0.150812 + b NaN 1.248003 0.191417 + c -0.150812 0.191417 0.895202 + """ + data = self._get_numeric_data() if numeric_only else self + cols = data.columns + idx = cols.copy() + mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False) + + if notna(mat).all(): + if min_periods is not None and min_periods > len(mat): + base_cov = np.empty((mat.shape[1], mat.shape[1])) + base_cov.fill(np.nan) + else: + base_cov = np.cov(mat.T, ddof=ddof) + base_cov = base_cov.reshape((len(cols), len(cols))) + else: + base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods) + + result = self._constructor(base_cov, index=idx, columns=cols, copy=False) + return result.__finalize__(self, method="cov") + + def corrwith( + self, + other: DataFrame | Series, + axis: Axis = 0, + drop: bool = False, + method: CorrelationMethod = "pearson", + numeric_only: bool = False, + ) -> Series: + """ + Compute pairwise correlation. + + Pairwise correlation is computed between rows or columns of + DataFrame with rows or columns of Series or DataFrame. DataFrames + are first aligned along both axes before computing the + correlations. + + Parameters + ---------- + other : DataFrame, Series + Object with which to compute correlations. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to use. 0 or 'index' to compute row-wise, 1 or 'columns' for + column-wise. + drop : bool, default False + Drop missing indices from result. + method : {'pearson', 'kendall', 'spearman'} or callable + Method of correlation: + + * pearson : standard correlation coefficient + * kendall : Kendall Tau correlation coefficient + * spearman : Spearman rank correlation + * callable: callable with input two 1d ndarrays + and returning a float. + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + The default value of ``numeric_only`` is now ``False``. + + Returns + ------- + Series + Pairwise correlations. + + See Also + -------- + DataFrame.corr : Compute pairwise correlation of columns. + + Examples + -------- + >>> index = ["a", "b", "c", "d", "e"] + >>> columns = ["one", "two", "three", "four"] + >>> df1 = pd.DataFrame(np.arange(20).reshape(5, 4), index=index, columns=columns) + >>> df2 = pd.DataFrame(np.arange(16).reshape(4, 4), index=index[:4], columns=columns) + >>> df1.corrwith(df2) + one 1.0 + two 1.0 + three 1.0 + four 1.0 + dtype: float64 + + >>> df2.corrwith(df1, axis=1) + a 1.0 + b 1.0 + c 1.0 + d 1.0 + e NaN + dtype: float64 + """ # noqa: E501 + axis = self._get_axis_number(axis) + this = self._get_numeric_data() if numeric_only else self + + if isinstance(other, Series): + return this.apply(lambda x: other.corr(x, method=method), axis=axis) + + if numeric_only: + other = other._get_numeric_data() + left, right = this.align(other, join="inner", copy=False) + + if axis == 1: + left = left.T + right = right.T + + if method == "pearson": + # mask missing values + left = left + right * 0 + right = right + left * 0 + + # demeaned data + ldem = left - left.mean(numeric_only=numeric_only) + rdem = right - right.mean(numeric_only=numeric_only) + + num = (ldem * rdem).sum() + dom = ( + (left.count() - 1) + * left.std(numeric_only=numeric_only) + * right.std(numeric_only=numeric_only) + ) + + correl = num / dom + + elif method in ["kendall", "spearman"] or callable(method): + + def c(x): + return nanops.nancorr(x[0], x[1], method=method) + + correl = self._constructor_sliced( + map(c, zip(left.values.T, right.values.T)), + index=left.columns, + copy=False, + ) + + else: + raise ValueError( + f"Invalid method {method} was passed, " + "valid methods are: 'pearson', 'kendall', " + "'spearman', or callable" + ) + + if not drop: + # Find non-matching labels along the given axis + # and append missing correlations (GH 22375) + raxis: AxisInt = 1 if axis == 0 else 0 + result_index = this._get_axis(raxis).union(other._get_axis(raxis)) + idx_diff = result_index.difference(correl.index) + + if len(idx_diff) > 0: + correl = correl._append( + Series([np.nan] * len(idx_diff), index=idx_diff) + ) + + return correl + + # ---------------------------------------------------------------------- + # ndarray-like stats methods + + def count(self, axis: Axis = 0, numeric_only: bool = False): + """ + Count non-NA cells for each column or row. + + The values `None`, `NaN`, `NaT`, ``pandas.NA`` are considered NA. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + If 0 or 'index' counts are generated for each column. + If 1 or 'columns' counts are generated for each row. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + Returns + ------- + Series + For each column/row the number of non-NA/null entries. + + See Also + -------- + Series.count: Number of non-NA elements in a Series. + DataFrame.value_counts: Count unique combinations of columns. + DataFrame.shape: Number of DataFrame rows and columns (including NA + elements). + DataFrame.isna: Boolean same-sized DataFrame showing places of NA + elements. + + Examples + -------- + Constructing DataFrame from a dictionary: + + >>> df = pd.DataFrame({"Person": + ... ["John", "Myla", "Lewis", "John", "Myla"], + ... "Age": [24., np.nan, 21., 33, 26], + ... "Single": [False, True, True, True, False]}) + >>> df + Person Age Single + 0 John 24.0 False + 1 Myla NaN True + 2 Lewis 21.0 True + 3 John 33.0 True + 4 Myla 26.0 False + + Notice the uncounted NA values: + + >>> df.count() + Person 5 + Age 4 + Single 5 + dtype: int64 + + Counts for each **row**: + + >>> df.count(axis='columns') + 0 3 + 1 2 + 2 3 + 3 3 + 4 3 + dtype: int64 + """ + axis = self._get_axis_number(axis) + + if numeric_only: + frame = self._get_numeric_data() + else: + frame = self + + # GH #423 + if len(frame._get_axis(axis)) == 0: + result = self._constructor_sliced(0, index=frame._get_agg_axis(axis)) + else: + result = notna(frame).sum(axis=axis) + + return result.astype("int64", copy=False).__finalize__(self, method="count") + + def _reduce( + self, + op, + name: str, + *, + axis: Axis = 0, + skipna: bool = True, + numeric_only: bool = False, + filter_type=None, + **kwds, + ): + assert filter_type is None or filter_type == "bool", filter_type + out_dtype = "bool" if filter_type == "bool" else None + + if axis is not None: + axis = self._get_axis_number(axis) + + def func(values: np.ndarray): + # We only use this in the case that operates on self.values + return op(values, axis=axis, skipna=skipna, **kwds) + + dtype_has_keepdims: dict[ExtensionDtype, bool] = {} + + def blk_func(values, axis: Axis = 1): + if isinstance(values, ExtensionArray): + if not is_1d_only_ea_dtype(values.dtype) and not isinstance( + self._mgr, ArrayManager + ): + return values._reduce(name, axis=1, skipna=skipna, **kwds) + has_keepdims = dtype_has_keepdims.get(values.dtype) + if has_keepdims is None: + sign = signature(values._reduce) + has_keepdims = "keepdims" in sign.parameters + dtype_has_keepdims[values.dtype] = has_keepdims + if has_keepdims: + return values._reduce(name, skipna=skipna, keepdims=True, **kwds) + else: + warnings.warn( + f"{type(values)}._reduce will require a `keepdims` parameter " + "in the future", + FutureWarning, + stacklevel=find_stack_level(), + ) + result = values._reduce(name, skipna=skipna, **kwds) + return np.array([result]) + else: + return op(values, axis=axis, skipna=skipna, **kwds) + + def _get_data() -> DataFrame: + if filter_type is None: + data = self._get_numeric_data() + else: + # GH#25101, GH#24434 + assert filter_type == "bool" + data = self._get_bool_data() + return data + + # Case with EAs see GH#35881 + df = self + if numeric_only: + df = _get_data() + if axis is None: + dtype = find_common_type([arr.dtype for arr in df._mgr.arrays]) + if isinstance(dtype, ExtensionDtype): + df = df.astype(dtype, copy=False) + arr = concat_compat(list(df._iter_column_arrays())) + return arr._reduce(name, skipna=skipna, keepdims=False, **kwds) + return func(df.values) + elif axis == 1: + if len(df.index) == 0: + # Taking a transpose would result in no columns, losing the dtype. + # In the empty case, reducing along axis 0 or 1 gives the same + # result dtype, so reduce with axis=0 and ignore values + result = df._reduce( + op, + name, + axis=0, + skipna=skipna, + numeric_only=False, + filter_type=filter_type, + **kwds, + ).iloc[:0] + result.index = df.index + return result + + # kurtosis excluded since groupby does not implement it + if df.shape[1] and name != "kurt": + dtype = find_common_type([arr.dtype for arr in df._mgr.arrays]) + if isinstance(dtype, ExtensionDtype): + # GH 54341: fastpath for EA-backed axis=1 reductions + # This flattens the frame into a single 1D array while keeping + # track of the row and column indices of the original frame. Once + # flattened, grouping by the row indices and aggregating should + # be equivalent to transposing the original frame and aggregating + # with axis=0. + name = {"argmax": "idxmax", "argmin": "idxmin"}.get(name, name) + df = df.astype(dtype, copy=False) + arr = concat_compat(list(df._iter_column_arrays())) + nrows, ncols = df.shape + row_index = np.tile(np.arange(nrows), ncols) + col_index = np.repeat(np.arange(ncols), nrows) + ser = Series(arr, index=col_index, copy=False) + # GroupBy will raise a warning with SeriesGroupBy as the object, + # likely confusing users + with rewrite_warning( + target_message=( + f"The behavior of SeriesGroupBy.{name} with all-NA values" + ), + target_category=FutureWarning, + new_message=( + f"The behavior of {type(self).__name__}.{name} with all-NA " + "values, or any-NA and skipna=False, is deprecated. In " + "a future version this will raise ValueError" + ), + ): + result = ser.groupby(row_index).agg(name, **kwds) + result.index = df.index + if not skipna and name not in ("any", "all"): + mask = df.isna().to_numpy(dtype=np.bool_).any(axis=1) + other = -1 if name in ("idxmax", "idxmin") else lib.no_default + result = result.mask(mask, other) + return result + + df = df.T + + # After possibly _get_data and transposing, we are now in the + # simple case where we can use BlockManager.reduce + res = df._mgr.reduce(blk_func) + out = df._constructor_from_mgr(res, axes=res.axes).iloc[0] + if out_dtype is not None and out.dtype != "boolean": + out = out.astype(out_dtype) + elif (df._mgr.get_dtypes() == object).any() and name not in ["any", "all"]: + out = out.astype(object) + elif len(self) == 0 and out.dtype == object and name in ("sum", "prod"): + # Even if we are object dtype, follow numpy and return + # float64, see test_apply_funcs_over_empty + out = out.astype(np.float64) + + return out + + def _reduce_axis1(self, name: str, func, skipna: bool) -> Series: + """ + Special case for _reduce to try to avoid a potentially-expensive transpose. + + Apply the reduction block-wise along axis=1 and then reduce the resulting + 1D arrays. + """ + if name == "all": + result = np.ones(len(self), dtype=bool) + ufunc = np.logical_and + elif name == "any": + result = np.zeros(len(self), dtype=bool) + # error: Incompatible types in assignment + # (expression has type "_UFunc_Nin2_Nout1[Literal['logical_or'], + # Literal[20], Literal[False]]", variable has type + # "_UFunc_Nin2_Nout1[Literal['logical_and'], Literal[20], + # Literal[True]]") + ufunc = np.logical_or # type: ignore[assignment] + else: + raise NotImplementedError(name) + + for arr in self._mgr.arrays: + middle = func(arr, axis=0, skipna=skipna) + result = ufunc(result, middle) + + res_ser = self._constructor_sliced(result, index=self.index, copy=False) + return res_ser + + @doc(make_doc("any", ndim=2)) + # error: Signature of "any" incompatible with supertype "NDFrame" + def any( # type: ignore[override] + self, + *, + axis: Axis | None = 0, + bool_only: bool = False, + skipna: bool = True, + **kwargs, + ) -> Series | bool: + result = self._logical_func( + "any", nanops.nanany, axis, bool_only, skipna, **kwargs + ) + if isinstance(result, Series): + result = result.__finalize__(self, method="any") + return result + + @doc(make_doc("all", ndim=2)) + def all( + self, + axis: Axis | None = 0, + bool_only: bool = False, + skipna: bool = True, + **kwargs, + ) -> Series | bool: + result = self._logical_func( + "all", nanops.nanall, axis, bool_only, skipna, **kwargs + ) + if isinstance(result, Series): + result = result.__finalize__(self, method="all") + return result + + @doc(make_doc("min", ndim=2)) + def min( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().min(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="min") + return result + + @doc(make_doc("max", ndim=2)) + def max( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().max(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="max") + return result + + @doc(make_doc("sum", ndim=2)) + def sum( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + min_count: int = 0, + **kwargs, + ): + result = super().sum(axis, skipna, numeric_only, min_count, **kwargs) + return result.__finalize__(self, method="sum") + + @doc(make_doc("prod", ndim=2)) + def prod( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + min_count: int = 0, + **kwargs, + ): + result = super().prod(axis, skipna, numeric_only, min_count, **kwargs) + return result.__finalize__(self, method="prod") + + @doc(make_doc("mean", ndim=2)) + def mean( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().mean(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="mean") + return result + + @doc(make_doc("median", ndim=2)) + def median( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().median(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="median") + return result + + @doc(make_doc("sem", ndim=2)) + def sem( + self, + axis: Axis | None = 0, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + result = super().sem(axis, skipna, ddof, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="sem") + return result + + @doc(make_doc("var", ndim=2)) + def var( + self, + axis: Axis | None = 0, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + result = super().var(axis, skipna, ddof, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="var") + return result + + @doc(make_doc("std", ndim=2)) + def std( + self, + axis: Axis | None = 0, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + result = super().std(axis, skipna, ddof, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="std") + return result + + @doc(make_doc("skew", ndim=2)) + def skew( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().skew(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="skew") + return result + + @doc(make_doc("kurt", ndim=2)) + def kurt( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().kurt(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="kurt") + return result + + kurtosis = kurt + product = prod + + @doc(make_doc("cummin", ndim=2)) + def cummin(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cummin(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cummax", ndim=2)) + def cummax(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cummax(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cumsum", ndim=2)) + def cumsum(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cumprod", 2)) + def cumprod(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) + + def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series: + """ + Count number of distinct elements in specified axis. + + Return Series with number of distinct elements. Can ignore NaN + values. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for + column-wise. + dropna : bool, default True + Don't include NaN in the counts. + + Returns + ------- + Series + + See Also + -------- + Series.nunique: Method nunique for Series. + DataFrame.count: Count non-NA cells for each column or row. + + Examples + -------- + >>> df = pd.DataFrame({'A': [4, 5, 6], 'B': [4, 1, 1]}) + >>> df.nunique() + A 3 + B 2 + dtype: int64 + + >>> df.nunique(axis=1) + 0 1 + 1 2 + 2 2 + dtype: int64 + """ + return self.apply(Series.nunique, axis=axis, dropna=dropna) + + @doc(_shared_docs["idxmin"], numeric_only_default="False") + def idxmin( + self, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False + ) -> Series: + axis = self._get_axis_number(axis) + + if self.empty and len(self.axes[axis]): + axis_dtype = self.axes[axis].dtype + return self._constructor_sliced(dtype=axis_dtype) + + if numeric_only: + data = self._get_numeric_data() + else: + data = self + + res = data._reduce( + nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False + ) + indices = res._values + # indices will always be np.ndarray since axis is not N + + if (indices == -1).any(): + warnings.warn( + f"The behavior of {type(self).__name__}.idxmin with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + + index = data._get_axis(axis) + result = algorithms.take( + index._values, indices, allow_fill=True, fill_value=index._na_value + ) + final_result = data._constructor_sliced(result, index=data._get_agg_axis(axis)) + return final_result.__finalize__(self, method="idxmin") + + @doc(_shared_docs["idxmax"], numeric_only_default="False") + def idxmax( + self, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False + ) -> Series: + axis = self._get_axis_number(axis) + + if self.empty and len(self.axes[axis]): + axis_dtype = self.axes[axis].dtype + return self._constructor_sliced(dtype=axis_dtype) + + if numeric_only: + data = self._get_numeric_data() + else: + data = self + + res = data._reduce( + nanops.nanargmax, "argmax", axis=axis, skipna=skipna, numeric_only=False + ) + indices = res._values + # indices will always be 1d array since axis is not None + + if (indices == -1).any(): + warnings.warn( + f"The behavior of {type(self).__name__}.idxmax with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + + index = data._get_axis(axis) + result = algorithms.take( + index._values, indices, allow_fill=True, fill_value=index._na_value + ) + final_result = data._constructor_sliced(result, index=data._get_agg_axis(axis)) + return final_result.__finalize__(self, method="idxmax") + + def _get_agg_axis(self, axis_num: int) -> Index: + """ + Let's be explicit about this. + """ + if axis_num == 0: + return self.columns + elif axis_num == 1: + return self.index + else: + raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})") + + def mode( + self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True + ) -> DataFrame: + """ + Get the mode(s) of each element along the selected axis. + + The mode of a set of values is the value that appears most often. + It can be multiple values. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to iterate over while searching for the mode: + + * 0 or 'index' : get mode of each column + * 1 or 'columns' : get mode of each row. + + numeric_only : bool, default False + If True, only apply to numeric columns. + dropna : bool, default True + Don't consider counts of NaN/NaT. + + Returns + ------- + DataFrame + The modes of each column or row. + + See Also + -------- + Series.mode : Return the highest frequency value in a Series. + Series.value_counts : Return the counts of values in a Series. + + Examples + -------- + >>> df = pd.DataFrame([('bird', 2, 2), + ... ('mammal', 4, np.nan), + ... ('arthropod', 8, 0), + ... ('bird', 2, np.nan)], + ... index=('falcon', 'horse', 'spider', 'ostrich'), + ... columns=('species', 'legs', 'wings')) + >>> df + species legs wings + falcon bird 2 2.0 + horse mammal 4 NaN + spider arthropod 8 0.0 + ostrich bird 2 NaN + + By default, missing values are not considered, and the mode of wings + are both 0 and 2. Because the resulting DataFrame has two rows, + the second row of ``species`` and ``legs`` contains ``NaN``. + + >>> df.mode() + species legs wings + 0 bird 2.0 0.0 + 1 NaN NaN 2.0 + + Setting ``dropna=False`` ``NaN`` values are considered and they can be + the mode (like for wings). + + >>> df.mode(dropna=False) + species legs wings + 0 bird 2 NaN + + Setting ``numeric_only=True``, only the mode of numeric columns is + computed, and columns of other types are ignored. + + >>> df.mode(numeric_only=True) + legs wings + 0 2.0 0.0 + 1 NaN 2.0 + + To compute the mode over columns and not rows, use the axis parameter: + + >>> df.mode(axis='columns', numeric_only=True) + 0 1 + falcon 2.0 NaN + horse 4.0 NaN + spider 0.0 8.0 + ostrich 2.0 NaN + """ + data = self if not numeric_only else self._get_numeric_data() + + def f(s): + return s.mode(dropna=dropna) + + data = data.apply(f, axis=axis) + # Ensure index is type stable (should always use int index) + if data.empty: + data.index = default_index(0) + + return data + + @overload + def quantile( + self, + q: float = ..., + axis: Axis = ..., + numeric_only: bool = ..., + interpolation: QuantileInterpolation = ..., + method: Literal["single", "table"] = ..., + ) -> Series: + ... + + @overload + def quantile( + self, + q: AnyArrayLike | Sequence[float], + axis: Axis = ..., + numeric_only: bool = ..., + interpolation: QuantileInterpolation = ..., + method: Literal["single", "table"] = ..., + ) -> Series | DataFrame: + ... + + @overload + def quantile( + self, + q: float | AnyArrayLike | Sequence[float] = ..., + axis: Axis = ..., + numeric_only: bool = ..., + interpolation: QuantileInterpolation = ..., + method: Literal["single", "table"] = ..., + ) -> Series | DataFrame: + ... + + def quantile( + self, + q: float | AnyArrayLike | Sequence[float] = 0.5, + axis: Axis = 0, + numeric_only: bool = False, + interpolation: QuantileInterpolation = "linear", + method: Literal["single", "table"] = "single", + ) -> Series | DataFrame: + """ + Return values at the given quantile over requested axis. + + Parameters + ---------- + q : float or array-like, default 0.5 (50% quantile) + Value between 0 <= q <= 1, the quantile(s) to compute. + axis : {0 or 'index', 1 or 'columns'}, default 0 + Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionchanged:: 2.0.0 + The default value of ``numeric_only`` is now ``False``. + + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + method : {'single', 'table'}, default 'single' + Whether to compute quantiles per-column ('single') or over all columns + ('table'). When 'table', the only allowed interpolation methods are + 'nearest', 'lower', and 'higher'. + + Returns + ------- + Series or DataFrame + + If ``q`` is an array, a DataFrame will be returned where the + index is ``q``, the columns are the columns of self, and the + values are the quantiles. + If ``q`` is a float, a Series will be returned where the + index is the columns of self and the values are the quantiles. + + See Also + -------- + core.window.rolling.Rolling.quantile: Rolling quantile. + numpy.percentile: Numpy function to compute the percentile. + + Examples + -------- + >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), + ... columns=['a', 'b']) + >>> df.quantile(.1) + a 1.3 + b 3.7 + Name: 0.1, dtype: float64 + >>> df.quantile([.1, .5]) + a b + 0.1 1.3 3.7 + 0.5 2.5 55.0 + + Specifying `method='table'` will compute the quantile over all columns. + + >>> df.quantile(.1, method="table", interpolation="nearest") + a 1 + b 1 + Name: 0.1, dtype: int64 + >>> df.quantile([.1, .5], method="table", interpolation="nearest") + a b + 0.1 1 1 + 0.5 3 100 + + Specifying `numeric_only=False` will also compute the quantile of + datetime and timedelta data. + + >>> df = pd.DataFrame({'A': [1, 2], + ... 'B': [pd.Timestamp('2010'), + ... pd.Timestamp('2011')], + ... 'C': [pd.Timedelta('1 days'), + ... pd.Timedelta('2 days')]}) + >>> df.quantile(0.5, numeric_only=False) + A 1.5 + B 2010-07-02 12:00:00 + C 1 days 12:00:00 + Name: 0.5, dtype: object + """ + validate_percentile(q) + axis = self._get_axis_number(axis) + + if not is_list_like(q): + # BlockManager.quantile expects listlike, so we wrap and unwrap here + # error: List item 0 has incompatible type "float | ExtensionArray | + # ndarray[Any, Any] | Index | Series | Sequence[float]"; expected "float" + res_df = self.quantile( + [q], # type: ignore[list-item] + axis=axis, + numeric_only=numeric_only, + interpolation=interpolation, + method=method, + ) + if method == "single": + res = res_df.iloc[0] + else: + # cannot directly iloc over sparse arrays + res = res_df.T.iloc[:, 0] + if axis == 1 and len(self) == 0: + # GH#41544 try to get an appropriate dtype + dtype = find_common_type(list(self.dtypes)) + if needs_i8_conversion(dtype): + return res.astype(dtype) + return res + + q = Index(q, dtype=np.float64) + data = self._get_numeric_data() if numeric_only else self + + if axis == 1: + data = data.T + + if len(data.columns) == 0: + # GH#23925 _get_numeric_data may have dropped all columns + cols = Index([], name=self.columns.name) + + dtype = np.float64 + if axis == 1: + # GH#41544 try to get an appropriate dtype + cdtype = find_common_type(list(self.dtypes)) + if needs_i8_conversion(cdtype): + dtype = cdtype + + res = self._constructor([], index=q, columns=cols, dtype=dtype) + return res.__finalize__(self, method="quantile") + + valid_method = {"single", "table"} + if method not in valid_method: + raise ValueError( + f"Invalid method: {method}. Method must be in {valid_method}." + ) + if method == "single": + res = data._mgr.quantile(qs=q, interpolation=interpolation) + elif method == "table": + valid_interpolation = {"nearest", "lower", "higher"} + if interpolation not in valid_interpolation: + raise ValueError( + f"Invalid interpolation: {interpolation}. " + f"Interpolation must be in {valid_interpolation}" + ) + # handle degenerate case + if len(data) == 0: + if data.ndim == 2: + dtype = find_common_type(list(self.dtypes)) + else: + dtype = self.dtype + return self._constructor([], index=q, columns=data.columns, dtype=dtype) + + q_idx = np.quantile(np.arange(len(data)), q, method=interpolation) + + by = data.columns + if len(by) > 1: + keys = [data._get_label_or_level_values(x) for x in by] + indexer = lexsort_indexer(keys) + else: + k = data._get_label_or_level_values(by[0]) + indexer = nargsort(k) + + res = data._mgr.take(indexer[q_idx], verify=False) + res.axes[1] = q + + result = self._constructor_from_mgr(res, axes=res.axes) + return result.__finalize__(self, method="quantile") + + def to_timestamp( + self, + freq: Frequency | None = None, + how: ToTimestampHow = "start", + axis: Axis = 0, + copy: bool | None = None, + ) -> DataFrame: + """ + Cast to DatetimeIndex of timestamps, at *beginning* of period. + + Parameters + ---------- + freq : str, default frequency of PeriodIndex + Desired frequency. + how : {'s', 'e', 'start', 'end'} + Convention for converting period to timestamp; start of period + vs. end. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to convert (the index by default). + copy : bool, default True + If False then underlying input data is not copied. + + .. note:: + The `copy` keyword will change behavior in pandas 3.0. + `Copy-on-Write + `__ + will be enabled by default, which means that all methods with a + `copy` keyword will use a lazy copy mechanism to defer the copy and + ignore the `copy` keyword. The `copy` keyword will be removed in a + future version of pandas. + + You can already get the future behavior and improvements through + enabling copy on write ``pd.options.mode.copy_on_write = True`` + + Returns + ------- + DataFrame + The DataFrame has a DatetimeIndex. + + Examples + -------- + >>> idx = pd.PeriodIndex(['2023', '2024'], freq='Y') + >>> d = {'col1': [1, 2], 'col2': [3, 4]} + >>> df1 = pd.DataFrame(data=d, index=idx) + >>> df1 + col1 col2 + 2023 1 3 + 2024 2 4 + + The resulting timestamps will be at the beginning of the year in this case + + >>> df1 = df1.to_timestamp() + >>> df1 + col1 col2 + 2023-01-01 1 3 + 2024-01-01 2 4 + >>> df1.index + DatetimeIndex(['2023-01-01', '2024-01-01'], dtype='datetime64[ns]', freq=None) + + Using `freq` which is the offset that the Timestamps will have + + >>> df2 = pd.DataFrame(data=d, index=idx) + >>> df2 = df2.to_timestamp(freq='M') + >>> df2 + col1 col2 + 2023-01-31 1 3 + 2024-01-31 2 4 + >>> df2.index + DatetimeIndex(['2023-01-31', '2024-01-31'], dtype='datetime64[ns]', freq=None) + """ + new_obj = self.copy(deep=copy and not using_copy_on_write()) + + axis_name = self._get_axis_name(axis) + old_ax = getattr(self, axis_name) + if not isinstance(old_ax, PeriodIndex): + raise TypeError(f"unsupported Type {type(old_ax).__name__}") + + new_ax = old_ax.to_timestamp(freq=freq, how=how) + + setattr(new_obj, axis_name, new_ax) + return new_obj + + def to_period( + self, freq: Frequency | None = None, axis: Axis = 0, copy: bool | None = None + ) -> DataFrame: + """ + Convert DataFrame from DatetimeIndex to PeriodIndex. + + Convert DataFrame from DatetimeIndex to PeriodIndex with desired + frequency (inferred from index if not passed). + + Parameters + ---------- + freq : str, default + Frequency of the PeriodIndex. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to convert (the index by default). + copy : bool, default True + If False then underlying input data is not copied. + + .. note:: + The `copy` keyword will change behavior in pandas 3.0. + `Copy-on-Write + `__ + will be enabled by default, which means that all methods with a + `copy` keyword will use a lazy copy mechanism to defer the copy and + ignore the `copy` keyword. The `copy` keyword will be removed in a + future version of pandas. + + You can already get the future behavior and improvements through + enabling copy on write ``pd.options.mode.copy_on_write = True`` + + Returns + ------- + DataFrame + The DataFrame has a PeriodIndex. + + Examples + -------- + >>> idx = pd.to_datetime( + ... [ + ... "2001-03-31 00:00:00", + ... "2002-05-31 00:00:00", + ... "2003-08-31 00:00:00", + ... ] + ... ) + + >>> idx + DatetimeIndex(['2001-03-31', '2002-05-31', '2003-08-31'], + dtype='datetime64[ns]', freq=None) + + >>> idx.to_period("M") + PeriodIndex(['2001-03', '2002-05', '2003-08'], dtype='period[M]') + + For the yearly frequency + + >>> idx.to_period("Y") + PeriodIndex(['2001', '2002', '2003'], dtype='period[Y-DEC]') + """ + new_obj = self.copy(deep=copy and not using_copy_on_write()) + + axis_name = self._get_axis_name(axis) + old_ax = getattr(self, axis_name) + if not isinstance(old_ax, DatetimeIndex): + raise TypeError(f"unsupported Type {type(old_ax).__name__}") + + new_ax = old_ax.to_period(freq=freq) + + setattr(new_obj, axis_name, new_ax) + return new_obj + + def isin(self, values: Series | DataFrame | Sequence | Mapping) -> DataFrame: + """ + Whether each element in the DataFrame is contained in values. + + Parameters + ---------- + values : iterable, Series, DataFrame or dict + The result will only be true at a location if all the + labels match. If `values` is a Series, that's the index. If + `values` is a dict, the keys must be the column names, + which must match. If `values` is a DataFrame, + then both the index and column labels must match. + + Returns + ------- + DataFrame + DataFrame of booleans showing whether each element in the DataFrame + is contained in values. + + See Also + -------- + DataFrame.eq: Equality test for DataFrame. + Series.isin: Equivalent method on Series. + Series.str.contains: Test if pattern or regex is contained within a + string of a Series or Index. + + Examples + -------- + >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, + ... index=['falcon', 'dog']) + >>> df + num_legs num_wings + falcon 2 2 + dog 4 0 + + When ``values`` is a list check whether every value in the DataFrame + is present in the list (which animals have 0 or 2 legs or wings) + + >>> df.isin([0, 2]) + num_legs num_wings + falcon True True + dog False True + + To check if ``values`` is *not* in the DataFrame, use the ``~`` operator: + + >>> ~df.isin([0, 2]) + num_legs num_wings + falcon False False + dog True False + + When ``values`` is a dict, we can pass values to check for each + column separately: + + >>> df.isin({'num_wings': [0, 3]}) + num_legs num_wings + falcon False False + dog False True + + When ``values`` is a Series or DataFrame the index and column must + match. Note that 'falcon' does not match based on the number of legs + in other. + + >>> other = pd.DataFrame({'num_legs': [8, 3], 'num_wings': [0, 2]}, + ... index=['spider', 'falcon']) + >>> df.isin(other) + num_legs num_wings + falcon False True + dog False False + """ + if isinstance(values, dict): + from pandas.core.reshape.concat import concat + + values = collections.defaultdict(list, values) + result = concat( + ( + self.iloc[:, [i]].isin(values[col]) + for i, col in enumerate(self.columns) + ), + axis=1, + ) + elif isinstance(values, Series): + if not values.index.is_unique: + raise ValueError("cannot compute isin with a duplicate axis.") + result = self.eq(values.reindex_like(self), axis="index") + elif isinstance(values, DataFrame): + if not (values.columns.is_unique and values.index.is_unique): + raise ValueError("cannot compute isin with a duplicate axis.") + result = self.eq(values.reindex_like(self)) + else: + if not is_list_like(values): + raise TypeError( + "only list-like or dict-like objects are allowed " + "to be passed to DataFrame.isin(), " + f"you passed a '{type(values).__name__}'" + ) + + def isin_(x): + # error: Argument 2 to "isin" has incompatible type "Union[Series, + # DataFrame, Sequence[Any], Mapping[Any, Any]]"; expected + # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]], Index, + # Series], List[Any], range]" + result = algorithms.isin( + x.ravel(), + values, # type: ignore[arg-type] + ) + return result.reshape(x.shape) + + res_mgr = self._mgr.apply(isin_) + result = self._constructor_from_mgr( + res_mgr, + axes=res_mgr.axes, + ) + return result.__finalize__(self, method="isin") + + # ---------------------------------------------------------------------- + # Add index and columns + _AXIS_ORDERS: list[Literal["index", "columns"]] = ["index", "columns"] + _AXIS_TO_AXIS_NUMBER: dict[Axis, int] = { + **NDFrame._AXIS_TO_AXIS_NUMBER, + 1: 1, + "columns": 1, + } + _AXIS_LEN = len(_AXIS_ORDERS) + _info_axis_number: Literal[1] = 1 + _info_axis_name: Literal["columns"] = "columns" + + index = properties.AxisProperty( + axis=1, + doc=""" + The index (row labels) of the DataFrame. + + The index of a DataFrame is a series of labels that identify each row. + The labels can be integers, strings, or any other hashable type. The index + is used for label-based access and alignment, and can be accessed or + modified using this attribute. + + Returns + ------- + pandas.Index + The index labels of the DataFrame. + + See Also + -------- + DataFrame.columns : The column labels of the DataFrame. + DataFrame.to_numpy : Convert the DataFrame to a NumPy array. + + Examples + -------- + >>> df = pd.DataFrame({'Name': ['Alice', 'Bob', 'Aritra'], + ... 'Age': [25, 30, 35], + ... 'Location': ['Seattle', 'New York', 'Kona']}, + ... index=([10, 20, 30])) + >>> df.index + Index([10, 20, 30], dtype='int64') + + In this example, we create a DataFrame with 3 rows and 3 columns, + including Name, Age, and Location information. We set the index labels to + be the integers 10, 20, and 30. We then access the `index` attribute of the + DataFrame, which returns an `Index` object containing the index labels. + + >>> df.index = [100, 200, 300] + >>> df + Name Age Location + 100 Alice 25 Seattle + 200 Bob 30 New York + 300 Aritra 35 Kona + + In this example, we modify the index labels of the DataFrame by assigning + a new list of labels to the `index` attribute. The DataFrame is then + updated with the new labels, and the output shows the modified DataFrame. + """, + ) + columns = properties.AxisProperty( + axis=0, + doc=dedent( + """ + The column labels of the DataFrame. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> df + A B + 0 1 3 + 1 2 4 + >>> df.columns + Index(['A', 'B'], dtype='object') + """ + ), + ) + + # ---------------------------------------------------------------------- + # Add plotting methods to DataFrame + plot = CachedAccessor("plot", pandas.plotting.PlotAccessor) + hist = pandas.plotting.hist_frame + boxplot = pandas.plotting.boxplot_frame + sparse = CachedAccessor("sparse", SparseFrameAccessor) + + # ---------------------------------------------------------------------- + # Internal Interface Methods + + def _to_dict_of_blocks(self): + """ + Return a dict of dtype -> Constructor Types that + each is a homogeneous dtype. + + Internal ONLY - only works for BlockManager + """ + mgr = self._mgr + # convert to BlockManager if needed -> this way support ArrayManager as well + mgr = cast(BlockManager, mgr_to_mgr(mgr, "block")) + return { + k: self._constructor_from_mgr(v, axes=v.axes).__finalize__(self) + for k, v, in mgr.to_dict().items() + } + + @property + def values(self) -> np.ndarray: + """ + Return a Numpy representation of the DataFrame. + + .. warning:: + + We recommend using :meth:`DataFrame.to_numpy` instead. + + Only the values in the DataFrame will be returned, the axes labels + will be removed. + + Returns + ------- + numpy.ndarray + The values of the DataFrame. + + See Also + -------- + DataFrame.to_numpy : Recommended alternative to this method. + DataFrame.index : Retrieve the index labels. + DataFrame.columns : Retrieving the column names. + + Notes + ----- + The dtype will be a lower-common-denominator dtype (implicit + upcasting); that is to say if the dtypes (even of numeric types) + are mixed, the one that accommodates all will be chosen. Use this + with care if you are not dealing with the blocks. + + e.g. If the dtypes are float16 and float32, dtype will be upcast to + float32. If dtypes are int32 and uint8, dtype will be upcast to + int32. By :func:`numpy.find_common_type` convention, mixing int64 + and uint64 will result in a float64 dtype. + + Examples + -------- + A DataFrame where all columns are the same type (e.g., int64) results + in an array of the same type. + + >>> df = pd.DataFrame({'age': [ 3, 29], + ... 'height': [94, 170], + ... 'weight': [31, 115]}) + >>> df + age height weight + 0 3 94 31 + 1 29 170 115 + >>> df.dtypes + age int64 + height int64 + weight int64 + dtype: object + >>> df.values + array([[ 3, 94, 31], + [ 29, 170, 115]]) + + A DataFrame with mixed type columns(e.g., str/object, int64, float32) + results in an ndarray of the broadest type that accommodates these + mixed types (e.g., object). + + >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'), + ... ('lion', 80.5, 1), + ... ('monkey', np.nan, None)], + ... columns=('name', 'max_speed', 'rank')) + >>> df2.dtypes + name object + max_speed float64 + rank object + dtype: object + >>> df2.values + array([['parrot', 24.0, 'second'], + ['lion', 80.5, 1], + ['monkey', nan, None]], dtype=object) + """ + return self._mgr.as_array() + + +def _from_nested_dict(data) -> collections.defaultdict: + new_data: collections.defaultdict = collections.defaultdict(dict) + for index, s in data.items(): + for col, v in s.items(): + new_data[col][index] = v + return new_data + + +def _reindex_for_setitem( + value: DataFrame | Series, index: Index +) -> tuple[ArrayLike, BlockValuesRefs | None]: + # reindex if necessary + + if value.index.equals(index) or not len(index): + if using_copy_on_write() and isinstance(value, Series): + return value._values, value._references + return value._values.copy(), None + + # GH#4107 + try: + reindexed_value = value.reindex(index)._values + except ValueError as err: + # raised in MultiIndex.from_tuples, see test_insert_error_msmgs + if not value.index.is_unique: + # duplicate axis + raise err + + raise TypeError( + "incompatible index of inserted column with frame index" + ) from err + return reindexed_value, None diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/roperator.py b/videollama2/lib/python3.10/site-packages/pandas/core/roperator.py new file mode 100644 index 0000000000000000000000000000000000000000..2f320f4e9c6b984b64e0fc1268e50a8ad1a7e1fe --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/roperator.py @@ -0,0 +1,62 @@ +""" +Reversed Operations not available in the stdlib operator module. +Defining these instead of using lambdas allows us to reference them by name. +""" +from __future__ import annotations + +import operator + + +def radd(left, right): + return right + left + + +def rsub(left, right): + return right - left + + +def rmul(left, right): + return right * left + + +def rdiv(left, right): + return right / left + + +def rtruediv(left, right): + return right / left + + +def rfloordiv(left, right): + return right // left + + +def rmod(left, right): + # check if right is a string as % is the string + # formatting operation; this is a TypeError + # otherwise perform the op + if isinstance(right, str): + typ = type(left).__name__ + raise TypeError(f"{typ} cannot perform the operation mod") + + return right % left + + +def rdivmod(left, right): + return divmod(right, left) + + +def rpow(left, right): + return right**left + + +def rand_(left, right): + return operator.and_(right, left) + + +def ror_(left, right): + return operator.or_(right, left) + + +def rxor(left, right): + return operator.xor(right, left) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/series.py b/videollama2/lib/python3.10/site-packages/pandas/core/series.py new file mode 100644 index 0000000000000000000000000000000000000000..6fd019656d207b12e01c9f5bc7260c8ff84bbdf5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/series.py @@ -0,0 +1,6631 @@ +""" +Data structure for 1-dimensional cross-sectional and time series data +""" +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, +) +import operator +import sys +from textwrap import dedent +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + overload, +) +import warnings +import weakref + +import numpy as np + +from pandas._config import ( + using_copy_on_write, + warn_copy_on_write, +) +from pandas._config.config import _get_option + +from pandas._libs import ( + lib, + properties, + reshape, +) +from pandas._libs.lib import is_range_indexer +from pandas.compat import PYPY +from pandas.compat._constants import REF_COUNT +from pandas.compat._optional import import_optional_dependency +from pandas.compat.numpy import function as nv +from pandas.errors import ( + ChainedAssignmentError, + InvalidIndexError, + _chained_assignment_method_msg, + _chained_assignment_msg, + _chained_assignment_warning_method_msg, + _chained_assignment_warning_msg, + _check_cacher, +) +from pandas.util._decorators import ( + Appender, + Substitution, + deprecate_nonkeyword_arguments, + doc, +) +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import ( + validate_ascending, + validate_bool_kwarg, + validate_percentile, +) + +from pandas.core.dtypes.astype import astype_is_view +from pandas.core.dtypes.cast import ( + LossySetitemError, + construct_1d_arraylike_from_scalar, + find_common_type, + infer_dtype_from, + maybe_box_native, + maybe_cast_pointwise_result, +) +from pandas.core.dtypes.common import ( + is_dict_like, + is_integer, + is_iterator, + is_list_like, + is_object_dtype, + is_scalar, + pandas_dtype, + validate_all_hashable, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, + SparseDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.inference import is_hashable +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, + notna, + remove_na_arraylike, +) + +from pandas.core import ( + algorithms, + base, + common as com, + missing, + nanops, + ops, + roperator, +) +from pandas.core.accessor import CachedAccessor +from pandas.core.apply import SeriesApply +from pandas.core.arrays import ExtensionArray +from pandas.core.arrays.arrow import ( + ListAccessor, + StructAccessor, +) +from pandas.core.arrays.categorical import CategoricalAccessor +from pandas.core.arrays.sparse import SparseAccessor +from pandas.core.arrays.string_ import StringDtype +from pandas.core.construction import ( + array as pd_array, + extract_array, + sanitize_array, +) +from pandas.core.generic import ( + NDFrame, + make_doc, +) +from pandas.core.indexers import ( + disallow_ndim_indexing, + unpack_1tuple, +) +from pandas.core.indexes.accessors import CombinedDatetimelikeProperties +from pandas.core.indexes.api import ( + DatetimeIndex, + Index, + MultiIndex, + PeriodIndex, + default_index, + ensure_index, +) +import pandas.core.indexes.base as ibase +from pandas.core.indexes.multi import maybe_droplevels +from pandas.core.indexing import ( + check_bool_indexer, + check_dict_or_set_indexers, +) +from pandas.core.internals import ( + SingleArrayManager, + SingleBlockManager, +) +from pandas.core.methods import selectn +from pandas.core.shared_docs import _shared_docs +from pandas.core.sorting import ( + ensure_key_mapped, + nargsort, +) +from pandas.core.strings.accessor import StringMethods +from pandas.core.tools.datetimes import to_datetime + +import pandas.io.formats.format as fmt +from pandas.io.formats.info import ( + INFO_DOCSTRING, + SeriesInfo, + series_sub_kwargs, +) +import pandas.plotting + +if TYPE_CHECKING: + from pandas._libs.internals import BlockValuesRefs + from pandas._typing import ( + AggFuncType, + AnyAll, + AnyArrayLike, + ArrayLike, + Axis, + AxisInt, + CorrelationMethod, + DropKeep, + Dtype, + DtypeObj, + FilePath, + Frequency, + IgnoreRaise, + IndexKeyFunc, + IndexLabel, + Level, + MutableMappingT, + NaPosition, + NumpySorter, + NumpyValueArrayLike, + QuantileInterpolation, + ReindexMethod, + Renamer, + Scalar, + Self, + SingleManager, + SortKind, + StorageOptions, + Suffixes, + ValueKeyFunc, + WriteBuffer, + npt, + ) + + from pandas.core.frame import DataFrame + from pandas.core.groupby.generic import SeriesGroupBy + +__all__ = ["Series"] + +_shared_doc_kwargs = { + "axes": "index", + "klass": "Series", + "axes_single_arg": "{0 or 'index'}", + "axis": """axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame.""", + "inplace": """inplace : bool, default False + If True, performs operation inplace and returns None.""", + "unique": "np.ndarray", + "duplicated": "Series", + "optional_by": "", + "optional_reindex": """ +index : array-like, optional + New labels for the index. Preferably an Index object to avoid + duplicating data. +axis : int or str, optional + Unused.""", +} + + +def _coerce_method(converter): + """ + Install the scalar coercion methods. + """ + + def wrapper(self): + if len(self) == 1: + warnings.warn( + f"Calling {converter.__name__} on a single element Series is " + "deprecated and will raise a TypeError in the future. " + f"Use {converter.__name__}(ser.iloc[0]) instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return converter(self.iloc[0]) + raise TypeError(f"cannot convert the series to {converter}") + + wrapper.__name__ = f"__{converter.__name__}__" + return wrapper + + +# ---------------------------------------------------------------------- +# Series class + + +# error: Cannot override final attribute "ndim" (previously declared in base +# class "NDFrame") +# error: Cannot override final attribute "size" (previously declared in base +# class "NDFrame") +# definition in base class "NDFrame" +class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] + """ + One-dimensional ndarray with axis labels (including time series). + + Labels need not be unique but must be a hashable type. The object + supports both integer- and label-based indexing and provides a host of + methods for performing operations involving the index. Statistical + methods from ndarray have been overridden to automatically exclude + missing data (currently represented as NaN). + + Operations between Series (+, -, /, \\*, \\*\\*) align values based on their + associated index values-- they need not be the same length. The result + index will be the sorted union of the two indexes. + + Parameters + ---------- + data : array-like, Iterable, dict, or scalar value + Contains data stored in Series. If data is a dict, argument order is + maintained. + index : array-like or Index (1d) + Values must be hashable and have the same length as `data`. + Non-unique index values are allowed. Will default to + RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like + and index is None, then the keys in the data are used as the index. If the + index is not None, the resulting Series is reindexed with the index values. + dtype : str, numpy.dtype, or ExtensionDtype, optional + Data type for the output Series. If not specified, this will be + inferred from `data`. + See the :ref:`user guide ` for more usages. + name : Hashable, default None + The name to give to the Series. + copy : bool, default False + Copy input data. Only affects Series or 1d ndarray input. See examples. + + Notes + ----- + Please reference the :ref:`User Guide ` for more information. + + Examples + -------- + Constructing Series from a dictionary with an Index specified + + >>> d = {'a': 1, 'b': 2, 'c': 3} + >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) + >>> ser + a 1 + b 2 + c 3 + dtype: int64 + + The keys of the dictionary match with the Index values, hence the Index + values have no effect. + + >>> d = {'a': 1, 'b': 2, 'c': 3} + >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) + >>> ser + x NaN + y NaN + z NaN + dtype: float64 + + Note that the Index is first build with the keys from the dictionary. + After this the Series is reindexed with the given Index values, hence we + get all NaN as a result. + + Constructing Series from a list with `copy=False`. + + >>> r = [1, 2] + >>> ser = pd.Series(r, copy=False) + >>> ser.iloc[0] = 999 + >>> r + [1, 2] + >>> ser + 0 999 + 1 2 + dtype: int64 + + Due to input data type the Series has a `copy` of + the original data even though `copy=False`, so + the data is unchanged. + + Constructing Series from a 1d ndarray with `copy=False`. + + >>> r = np.array([1, 2]) + >>> ser = pd.Series(r, copy=False) + >>> ser.iloc[0] = 999 + >>> r + array([999, 2]) + >>> ser + 0 999 + 1 2 + dtype: int64 + + Due to input data type the Series has a `view` on + the original data, so + the data is changed as well. + """ + + _typ = "series" + _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) + + _name: Hashable + _metadata: list[str] = ["_name"] + _internal_names_set = {"index", "name"} | NDFrame._internal_names_set + _accessors = {"dt", "cat", "str", "sparse"} + _hidden_attrs = ( + base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) + ) + + # similar to __array_priority__, positions Series after DataFrame + # but before Index and ExtensionArray. Should NOT be overridden by subclasses. + __pandas_priority__ = 3000 + + # Override cache_readonly bc Series is mutable + # error: Incompatible types in assignment (expression has type "property", + # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") + hasnans = property( # type: ignore[assignment] + # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" + base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] + doc=base.IndexOpsMixin.hasnans.__doc__, + ) + _mgr: SingleManager + + # ---------------------------------------------------------------------- + # Constructors + + def __init__( + self, + data=None, + index=None, + dtype: Dtype | None = None, + name=None, + copy: bool | None = None, + fastpath: bool | lib.NoDefault = lib.no_default, + ) -> None: + if fastpath is not lib.no_default: + warnings.warn( + "The 'fastpath' keyword in pd.Series is deprecated and will " + "be removed in a future version.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + else: + fastpath = False + + allow_mgr = False + if ( + isinstance(data, (SingleBlockManager, SingleArrayManager)) + and index is None + and dtype is None + and (copy is False or copy is None) + ): + if not allow_mgr: + # GH#52419 + warnings.warn( + f"Passing a {type(data).__name__} to {type(self).__name__} " + "is deprecated and will raise in a future version. " + "Use public APIs instead.", + DeprecationWarning, + stacklevel=2, + ) + if using_copy_on_write(): + data = data.copy(deep=False) + # GH#33357 called with just the SingleBlockManager + NDFrame.__init__(self, data) + if fastpath: + # e.g. from _box_col_values, skip validation of name + object.__setattr__(self, "_name", name) + else: + self.name = name + return + + is_pandas_object = isinstance(data, (Series, Index, ExtensionArray)) + data_dtype = getattr(data, "dtype", None) + original_dtype = dtype + + if isinstance(data, (ExtensionArray, np.ndarray)): + if copy is not False and using_copy_on_write(): + if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): + data = data.copy() + if copy is None: + copy = False + + # we are called internally, so short-circuit + if fastpath: + # data is a ndarray, index is defined + if not isinstance(data, (SingleBlockManager, SingleArrayManager)): + manager = _get_option("mode.data_manager", silent=True) + if manager == "block": + data = SingleBlockManager.from_array(data, index) + elif manager == "array": + data = SingleArrayManager.from_array(data, index) + allow_mgr = True + elif using_copy_on_write() and not copy: + data = data.copy(deep=False) + + if not allow_mgr: + warnings.warn( + f"Passing a {type(data).__name__} to {type(self).__name__} " + "is deprecated and will raise in a future version. " + "Use public APIs instead.", + DeprecationWarning, + stacklevel=2, + ) + + if copy: + data = data.copy() + # skips validation of the name + object.__setattr__(self, "_name", name) + NDFrame.__init__(self, data) + return + + if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: + data = data.copy(deep=False) + + if not allow_mgr: + warnings.warn( + f"Passing a {type(data).__name__} to {type(self).__name__} " + "is deprecated and will raise in a future version. " + "Use public APIs instead.", + DeprecationWarning, + stacklevel=2, + ) + + name = ibase.maybe_extract_name(name, data, type(self)) + + if index is not None: + index = ensure_index(index) + + if dtype is not None: + dtype = self._validate_dtype(dtype) + + if data is None: + index = index if index is not None else default_index(0) + if len(index) or dtype is not None: + data = na_value_for_dtype(pandas_dtype(dtype), compat=False) + else: + data = [] + + if isinstance(data, MultiIndex): + raise NotImplementedError( + "initializing a Series from a MultiIndex is not supported" + ) + + refs = None + if isinstance(data, Index): + if dtype is not None: + data = data.astype(dtype, copy=False) + + if using_copy_on_write(): + refs = data._references + data = data._values + else: + # GH#24096 we need to ensure the index remains immutable + data = data._values.copy() + copy = False + + elif isinstance(data, np.ndarray): + if len(data.dtype): + # GH#13296 we are dealing with a compound dtype, which + # should be treated as 2D + raise ValueError( + "Cannot construct a Series from an ndarray with " + "compound dtype. Use DataFrame instead." + ) + elif isinstance(data, Series): + if index is None: + index = data.index + data = data._mgr.copy(deep=False) + else: + data = data.reindex(index, copy=copy) + copy = False + data = data._mgr + elif isinstance(data, Mapping): + data, index = self._init_dict(data, index, dtype) + dtype = None + copy = False + elif isinstance(data, (SingleBlockManager, SingleArrayManager)): + if index is None: + index = data.index + elif not data.index.equals(index) or copy: + # GH#19275 SingleBlockManager input should only be called + # internally + raise AssertionError( + "Cannot pass both SingleBlockManager " + "`data` argument and a different " + "`index` argument. `copy` must be False." + ) + + if not allow_mgr: + warnings.warn( + f"Passing a {type(data).__name__} to {type(self).__name__} " + "is deprecated and will raise in a future version. " + "Use public APIs instead.", + DeprecationWarning, + stacklevel=2, + ) + allow_mgr = True + + elif isinstance(data, ExtensionArray): + pass + else: + data = com.maybe_iterable_to_list(data) + if is_list_like(data) and not len(data) and dtype is None: + # GH 29405: Pre-2.0, this defaulted to float. + dtype = np.dtype(object) + + if index is None: + if not is_list_like(data): + data = [data] + index = default_index(len(data)) + elif is_list_like(data): + com.require_length_match(data, index) + + # create/copy the manager + if isinstance(data, (SingleBlockManager, SingleArrayManager)): + if dtype is not None: + data = data.astype(dtype=dtype, errors="ignore", copy=copy) + elif copy: + data = data.copy() + else: + data = sanitize_array(data, index, dtype, copy) + + manager = _get_option("mode.data_manager", silent=True) + if manager == "block": + data = SingleBlockManager.from_array(data, index, refs=refs) + elif manager == "array": + data = SingleArrayManager.from_array(data, index) + + NDFrame.__init__(self, data) + self.name = name + self._set_axis(0, index) + + if original_dtype is None and is_pandas_object and data_dtype == np.object_: + if self.dtype != data_dtype: + warnings.warn( + "Dtype inference on a pandas object " + "(Series, Index, ExtensionArray) is deprecated. The Series " + "constructor will keep the original dtype in the future. " + "Call `infer_objects` on the result to get the old behavior.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + def _init_dict( + self, data: Mapping, index: Index | None = None, dtype: DtypeObj | None = None + ): + """ + Derive the "_mgr" and "index" attributes of a new Series from a + dictionary input. + + Parameters + ---------- + data : dict or dict-like + Data used to populate the new Series. + index : Index or None, default None + Index for the new Series: if None, use dict keys. + dtype : np.dtype, ExtensionDtype, or None, default None + The dtype for the new Series: if None, infer from data. + + Returns + ------- + _data : BlockManager for the new Series + index : index for the new Series + """ + keys: Index | tuple + + # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] + # raises KeyError), so we iterate the entire dict, and align + if data: + # GH:34717, issue was using zip to extract key and values from data. + # using generators in effects the performance. + # Below is the new way of extracting the keys and values + + keys = tuple(data.keys()) + values = list(data.values()) # Generating list of values- faster way + elif index is not None: + # fastpath for Series(data=None). Just use broadcasting a scalar + # instead of reindexing. + if len(index) or dtype is not None: + values = na_value_for_dtype(pandas_dtype(dtype), compat=False) + else: + values = [] + keys = index + else: + keys, values = default_index(0), [] + + # Input is now list-like, so rely on "standard" construction: + s = Series(values, index=keys, dtype=dtype) + + # Now we just make sure the order is respected, if any + if data and index is not None: + s = s.reindex(index, copy=False) + return s._mgr, s.index + + # ---------------------------------------------------------------------- + + @property + def _constructor(self) -> Callable[..., Series]: + return Series + + def _constructor_from_mgr(self, mgr, axes): + ser = Series._from_mgr(mgr, axes=axes) + ser._name = None # caller is responsible for setting real name + + if type(self) is Series: + # This would also work `if self._constructor is Series`, but + # this check is slightly faster, benefiting the most-common case. + return ser + + # We assume that the subclass __init__ knows how to handle a + # pd.Series object. + return self._constructor(ser) + + @property + def _constructor_expanddim(self) -> Callable[..., DataFrame]: + """ + Used when a manipulation result has one higher dimension as the + original, such as Series.to_frame() + """ + from pandas.core.frame import DataFrame + + return DataFrame + + def _constructor_expanddim_from_mgr(self, mgr, axes): + from pandas.core.frame import DataFrame + + df = DataFrame._from_mgr(mgr, axes=mgr.axes) + + if type(self) is Series: + # This would also work `if self._constructor_expanddim is DataFrame`, + # but this check is slightly faster, benefiting the most-common case. + return df + + # We assume that the subclass __init__ knows how to handle a + # pd.DataFrame object. + return self._constructor_expanddim(df) + + # types + @property + def _can_hold_na(self) -> bool: + return self._mgr._can_hold_na + + # ndarray compatibility + @property + def dtype(self) -> DtypeObj: + """ + Return the dtype object of the underlying data. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.dtype + dtype('int64') + """ + return self._mgr.dtype + + @property + def dtypes(self) -> DtypeObj: + """ + Return the dtype object of the underlying data. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.dtypes + dtype('int64') + """ + # DataFrame compatibility + return self.dtype + + @property + def name(self) -> Hashable: + """ + Return the name of the Series. + + The name of a Series becomes its index or column name if it is used + to form a DataFrame. It is also used whenever displaying the Series + using the interpreter. + + Returns + ------- + label (hashable object) + The name of the Series, also the column name if part of a DataFrame. + + See Also + -------- + Series.rename : Sets the Series name when given a scalar input. + Index.name : Corresponding Index property. + + Examples + -------- + The Series name can be set initially when calling the constructor. + + >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') + >>> s + 0 1 + 1 2 + 2 3 + Name: Numbers, dtype: int64 + >>> s.name = "Integers" + >>> s + 0 1 + 1 2 + 2 3 + Name: Integers, dtype: int64 + + The name of a Series within a DataFrame is its column name. + + >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], + ... columns=["Odd Numbers", "Even Numbers"]) + >>> df + Odd Numbers Even Numbers + 0 1 2 + 1 3 4 + 2 5 6 + >>> df["Even Numbers"].name + 'Even Numbers' + """ + return self._name + + @name.setter + def name(self, value: Hashable) -> None: + validate_all_hashable(value, error_name=f"{type(self).__name__}.name") + object.__setattr__(self, "_name", value) + + @property + def values(self): + """ + Return Series as ndarray or ndarray-like depending on the dtype. + + .. warning:: + + We recommend using :attr:`Series.array` or + :meth:`Series.to_numpy`, depending on whether you need + a reference to the underlying data or a NumPy array. + + Returns + ------- + numpy.ndarray or ndarray-like + + See Also + -------- + Series.array : Reference to the underlying data. + Series.to_numpy : A NumPy array representing the underlying data. + + Examples + -------- + >>> pd.Series([1, 2, 3]).values + array([1, 2, 3]) + + >>> pd.Series(list('aabc')).values + array(['a', 'a', 'b', 'c'], dtype=object) + + >>> pd.Series(list('aabc')).astype('category').values + ['a', 'a', 'b', 'c'] + Categories (3, object): ['a', 'b', 'c'] + + Timezone aware datetime data is converted to UTC: + + >>> pd.Series(pd.date_range('20130101', periods=3, + ... tz='US/Eastern')).values + array(['2013-01-01T05:00:00.000000000', + '2013-01-02T05:00:00.000000000', + '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') + """ + return self._mgr.external_values() + + @property + def _values(self): + """ + Return the internal repr of this data (defined by Block.interval_values). + This are the values as stored in the Block (ndarray or ExtensionArray + depending on the Block class), with datetime64[ns] and timedelta64[ns] + wrapped in ExtensionArrays to match Index._values behavior. + + Differs from the public ``.values`` for certain data types, because of + historical backwards compatibility of the public attribute (e.g. period + returns object ndarray and datetimetz a datetime64[ns] ndarray for + ``.values`` while it returns an ExtensionArray for ``._values`` in those + cases). + + Differs from ``.array`` in that this still returns the numpy array if + the Block is backed by a numpy array (except for datetime64 and + timedelta64 dtypes), while ``.array`` ensures to always return an + ExtensionArray. + + Overview: + + dtype | values | _values | array | + ----------- | ------------- | ------------- | --------------------- | + Numeric | ndarray | ndarray | NumpyExtensionArray | + Category | Categorical | Categorical | Categorical | + dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | + dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | + td64[ns] | ndarray[m8ns] | TimedeltaArray| TimedeltaArray | + Period | ndarray[obj] | PeriodArray | PeriodArray | + Nullable | EA | EA | EA | + + """ + return self._mgr.internal_values() + + @property + def _references(self) -> BlockValuesRefs | None: + if isinstance(self._mgr, SingleArrayManager): + return None + return self._mgr._block.refs + + # error: Decorated property not supported + @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc] + @property + def array(self) -> ExtensionArray: + return self._mgr.array_values() + + # ops + def ravel(self, order: str = "C") -> ArrayLike: + """ + Return the flattened underlying data as an ndarray or ExtensionArray. + + .. deprecated:: 2.2.0 + Series.ravel is deprecated. The underlying array is already 1D, so + ravel is not necessary. Use :meth:`to_numpy` for conversion to a numpy + array instead. + + Returns + ------- + numpy.ndarray or ExtensionArray + Flattened data of the Series. + + See Also + -------- + numpy.ndarray.ravel : Return a flattened array. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.ravel() # doctest: +SKIP + array([1, 2, 3]) + """ + warnings.warn( + "Series.ravel is deprecated. The underlying array is already 1D, so " + "ravel is not necessary. Use `to_numpy()` for conversion to a numpy " + "array instead.", + FutureWarning, + stacklevel=2, + ) + arr = self._values.ravel(order=order) + if isinstance(arr, np.ndarray) and using_copy_on_write(): + arr.flags.writeable = False + return arr + + def __len__(self) -> int: + """ + Return the length of the Series. + """ + return len(self._mgr) + + def view(self, dtype: Dtype | None = None) -> Series: + """ + Create a new view of the Series. + + .. deprecated:: 2.2.0 + ``Series.view`` is deprecated and will be removed in a future version. + Use :meth:`Series.astype` as an alternative to change the dtype. + + This function will return a new Series with a view of the same + underlying values in memory, optionally reinterpreted with a new data + type. The new data type must preserve the same size in bytes as to not + cause index misalignment. + + Parameters + ---------- + dtype : data type + Data type object or one of their string representations. + + Returns + ------- + Series + A new Series object as a view of the same data in memory. + + See Also + -------- + numpy.ndarray.view : Equivalent numpy function to create a new view of + the same data in memory. + + Notes + ----- + Series are instantiated with ``dtype=float64`` by default. While + ``numpy.ndarray.view()`` will return a view with the same data type as + the original array, ``Series.view()`` (without specified dtype) + will try using ``float64`` and may fail if the original data type size + in bytes is not the same. + + Examples + -------- + Use ``astype`` to change the dtype instead. + """ + warnings.warn( + "Series.view is deprecated and will be removed in a future version. " + "Use ``astype`` as an alternative to change the dtype.", + FutureWarning, + stacklevel=2, + ) + # self.array instead of self._values so we piggyback on NumpyExtensionArray + # implementation + res_values = self.array.view(dtype) + res_ser = self._constructor(res_values, index=self.index, copy=False) + if isinstance(res_ser._mgr, SingleBlockManager): + blk = res_ser._mgr._block + blk.refs = cast("BlockValuesRefs", self._references) + blk.refs.add_reference(blk) + return res_ser.__finalize__(self, method="view") + + # ---------------------------------------------------------------------- + # NDArray Compat + def __array__( + self, dtype: npt.DTypeLike | None = None, copy: bool | None = None + ) -> np.ndarray: + """ + Return the values as a NumPy array. + + Users should not call this directly. Rather, it is invoked by + :func:`numpy.array` and :func:`numpy.asarray`. + + Parameters + ---------- + dtype : str or numpy.dtype, optional + The dtype to use for the resulting NumPy array. By default, + the dtype is inferred from the data. + + copy : bool or None, optional + Unused. + + Returns + ------- + numpy.ndarray + The values in the series converted to a :class:`numpy.ndarray` + with the specified `dtype`. + + See Also + -------- + array : Create a new array from data. + Series.array : Zero-copy view to the array backing the Series. + Series.to_numpy : Series method for similar behavior. + + Examples + -------- + >>> ser = pd.Series([1, 2, 3]) + >>> np.asarray(ser) + array([1, 2, 3]) + + For timezone-aware data, the timezones may be retained with + ``dtype='object'`` + + >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) + >>> np.asarray(tzser, dtype="object") + array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), + Timestamp('2000-01-02 00:00:00+0100', tz='CET')], + dtype=object) + + Or the values may be localized to UTC and the tzinfo discarded with + ``dtype='datetime64[ns]'`` + + >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS + array(['1999-12-31T23:00:00.000000000', ...], + dtype='datetime64[ns]') + """ + values = self._values + arr = np.asarray(values, dtype=dtype) + if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): + arr = arr.view() + arr.flags.writeable = False + return arr + + # ---------------------------------------------------------------------- + + def __column_consortium_standard__(self, *, api_version: str | None = None) -> Any: + """ + Provide entry point to the Consortium DataFrame Standard API. + + This is developed and maintained outside of pandas. + Please report any issues to https://github.com/data-apis/dataframe-api-compat. + """ + dataframe_api_compat = import_optional_dependency("dataframe_api_compat") + return ( + dataframe_api_compat.pandas_standard.convert_to_standard_compliant_column( + self, api_version=api_version + ) + ) + + # ---------------------------------------------------------------------- + # Unary Methods + + # coercion + __float__ = _coerce_method(float) + __int__ = _coerce_method(int) + + # ---------------------------------------------------------------------- + + # indexers + @property + def axes(self) -> list[Index]: + """ + Return a list of the row axis labels. + """ + return [self.index] + + # ---------------------------------------------------------------------- + # Indexing Methods + + def _ixs(self, i: int, axis: AxisInt = 0) -> Any: + """ + Return the i-th value or values in the Series by location. + + Parameters + ---------- + i : int + + Returns + ------- + scalar + """ + return self._values[i] + + def _slice(self, slobj: slice, axis: AxisInt = 0) -> Series: + # axis kwarg is retained for compat with NDFrame method + # _slice is *always* positional + mgr = self._mgr.get_slice(slobj, axis=axis) + out = self._constructor_from_mgr(mgr, axes=mgr.axes) + out._name = self._name + return out.__finalize__(self) + + def __getitem__(self, key): + check_dict_or_set_indexers(key) + key = com.apply_if_callable(key, self) + + if key is Ellipsis: + if using_copy_on_write() or warn_copy_on_write(): + return self.copy(deep=False) + return self + + key_is_scalar = is_scalar(key) + if isinstance(key, (list, tuple)): + key = unpack_1tuple(key) + + if is_integer(key) and self.index._should_fallback_to_positional: + warnings.warn( + # GH#50617 + "Series.__getitem__ treating keys as positions is deprecated. " + "In a future version, integer keys will always be treated " + "as labels (consistent with DataFrame behavior). To access " + "a value by position, use `ser.iloc[pos]`", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._values[key] + + elif key_is_scalar: + return self._get_value(key) + + # Convert generator to list before going through hashable part + # (We will iterate through the generator there to check for slices) + if is_iterator(key): + key = list(key) + + if is_hashable(key) and not isinstance(key, slice): + # Otherwise index.get_value will raise InvalidIndexError + try: + # For labels that don't resolve as scalars like tuples and frozensets + result = self._get_value(key) + + return result + + except (KeyError, TypeError, InvalidIndexError): + # InvalidIndexError for e.g. generator + # see test_series_getitem_corner_generator + if isinstance(key, tuple) and isinstance(self.index, MultiIndex): + # We still have the corner case where a tuple is a key + # in the first level of our MultiIndex + return self._get_values_tuple(key) + + if isinstance(key, slice): + # Do slice check before somewhat-costly is_bool_indexer + return self._getitem_slice(key) + + if com.is_bool_indexer(key): + key = check_bool_indexer(self.index, key) + key = np.asarray(key, dtype=bool) + return self._get_rows_with_mask(key) + + return self._get_with(key) + + def _get_with(self, key): + # other: fancy integer or otherwise + if isinstance(key, ABCDataFrame): + raise TypeError( + "Indexing a Series with DataFrame is not " + "supported, use the appropriate DataFrame column" + ) + elif isinstance(key, tuple): + return self._get_values_tuple(key) + + elif not is_list_like(key): + # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 + return self.loc[key] + + if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): + key = list(key) + + key_type = lib.infer_dtype(key, skipna=False) + + # Note: The key_type == "boolean" case should be caught by the + # com.is_bool_indexer check in __getitem__ + if key_type == "integer": + # We need to decide whether to treat this as a positional indexer + # (i.e. self.iloc) or label-based (i.e. self.loc) + if not self.index._should_fallback_to_positional: + return self.loc[key] + else: + warnings.warn( + # GH#50617 + "Series.__getitem__ treating keys as positions is deprecated. " + "In a future version, integer keys will always be treated " + "as labels (consistent with DataFrame behavior). To access " + "a value by position, use `ser.iloc[pos]`", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.iloc[key] + + # handle the dup indexing case GH#4246 + return self.loc[key] + + def _get_values_tuple(self, key: tuple): + # mpl hackaround + if com.any_none(*key): + # mpl compat if we look up e.g. ser[:, np.newaxis]; + # see tests.series.timeseries.test_mpl_compat_hack + # the asarray is needed to avoid returning a 2D DatetimeArray + result = np.asarray(self._values[key]) + disallow_ndim_indexing(result) + return result + + if not isinstance(self.index, MultiIndex): + raise KeyError("key of type tuple not found and not a MultiIndex") + + # If key is contained, would have returned by now + indexer, new_index = self.index.get_loc_level(key) + new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) + if isinstance(indexer, slice): + new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] + return new_ser.__finalize__(self) + + def _get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> Series: + new_mgr = self._mgr.get_rows_with_mask(indexer) + return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self) + + def _get_value(self, label, takeable: bool = False): + """ + Quickly retrieve single value at passed index label. + + Parameters + ---------- + label : object + takeable : interpret the index as indexers, default False + + Returns + ------- + scalar value + """ + if takeable: + return self._values[label] + + # Similar to Index.get_value, but we do not fall back to positional + loc = self.index.get_loc(label) + + if is_integer(loc): + return self._values[loc] + + if isinstance(self.index, MultiIndex): + mi = self.index + new_values = self._values[loc] + if len(new_values) == 1 and mi.nlevels == 1: + # If more than one level left, we can not return a scalar + return new_values[0] + + new_index = mi[loc] + new_index = maybe_droplevels(new_index, label) + new_ser = self._constructor( + new_values, index=new_index, name=self.name, copy=False + ) + if isinstance(loc, slice): + new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] + return new_ser.__finalize__(self) + + else: + return self.iloc[loc] + + def __setitem__(self, key, value) -> None: + warn = True + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= 3: + warnings.warn( + _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 + ) + elif not PYPY and not using_copy_on_write(): + ctr = sys.getrefcount(self) + ref_count = 3 + if not warn_copy_on_write() and _check_cacher(self): + # see https://github.com/pandas-dev/pandas/pull/56060#discussion_r1399245221 + ref_count += 1 + if ctr <= ref_count and ( + warn_copy_on_write() + or ( + not warn_copy_on_write() + and self._mgr.blocks[0].refs.has_reference() # type: ignore[union-attr] + ) + ): + warn = False + warnings.warn( + _chained_assignment_warning_msg, FutureWarning, stacklevel=2 + ) + + check_dict_or_set_indexers(key) + key = com.apply_if_callable(key, self) + cacher_needs_updating = self._check_is_chained_assignment_possible() + + if key is Ellipsis: + key = slice(None) + + if isinstance(key, slice): + indexer = self.index._convert_slice_indexer(key, kind="getitem") + return self._set_values(indexer, value, warn=warn) + + try: + self._set_with_engine(key, value, warn=warn) + except KeyError: + # We have a scalar (or for MultiIndex or object-dtype, scalar-like) + # key that is not present in self.index. + if is_integer(key): + if not self.index._should_fallback_to_positional: + # GH#33469 + self.loc[key] = value + else: + # positional setter + # can't use _mgr.setitem_inplace yet bc could have *both* + # KeyError and then ValueError, xref GH#45070 + warnings.warn( + # GH#50617 + "Series.__setitem__ treating keys as positions is deprecated. " + "In a future version, integer keys will always be treated " + "as labels (consistent with DataFrame behavior). To set " + "a value by position, use `ser.iloc[pos] = value`", + FutureWarning, + stacklevel=find_stack_level(), + ) + self._set_values(key, value) + else: + # GH#12862 adding a new key to the Series + self.loc[key] = value + + except (TypeError, ValueError, LossySetitemError): + # The key was OK, but we cannot set the value losslessly + indexer = self.index.get_loc(key) + self._set_values(indexer, value) + + except InvalidIndexError as err: + if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): + # cases with MultiIndex don't get here bc they raise KeyError + # e.g. test_basic_getitem_setitem_corner + raise KeyError( + "key of type tuple not found and not a MultiIndex" + ) from err + + if com.is_bool_indexer(key): + key = check_bool_indexer(self.index, key) + key = np.asarray(key, dtype=bool) + + if ( + is_list_like(value) + and len(value) != len(self) + and not isinstance(value, Series) + and not is_object_dtype(self.dtype) + ): + # Series will be reindexed to have matching length inside + # _where call below + # GH#44265 + indexer = key.nonzero()[0] + self._set_values(indexer, value) + return + + # otherwise with listlike other we interpret series[mask] = other + # as series[mask] = other[mask] + try: + self._where(~key, value, inplace=True, warn=warn) + except InvalidIndexError: + # test_where_dups + self.iloc[key] = value + return + + else: + self._set_with(key, value, warn=warn) + + if cacher_needs_updating: + self._maybe_update_cacher(inplace=True) + + def _set_with_engine(self, key, value, warn: bool = True) -> None: + loc = self.index.get_loc(key) + + # this is equivalent to self._values[key] = value + self._mgr.setitem_inplace(loc, value, warn=warn) + + def _set_with(self, key, value, warn: bool = True) -> None: + # We got here via exception-handling off of InvalidIndexError, so + # key should always be listlike at this point. + assert not isinstance(key, tuple) + + if is_iterator(key): + # Without this, the call to infer_dtype will consume the generator + key = list(key) + + if not self.index._should_fallback_to_positional: + # Regardless of the key type, we're treating it as labels + self._set_labels(key, value, warn=warn) + + else: + # Note: key_type == "boolean" should not occur because that + # should be caught by the is_bool_indexer check in __setitem__ + key_type = lib.infer_dtype(key, skipna=False) + + if key_type == "integer": + warnings.warn( + # GH#50617 + "Series.__setitem__ treating keys as positions is deprecated. " + "In a future version, integer keys will always be treated " + "as labels (consistent with DataFrame behavior). To set " + "a value by position, use `ser.iloc[pos] = value`", + FutureWarning, + stacklevel=find_stack_level(), + ) + self._set_values(key, value, warn=warn) + else: + self._set_labels(key, value, warn=warn) + + def _set_labels(self, key, value, warn: bool = True) -> None: + key = com.asarray_tuplesafe(key) + indexer: np.ndarray = self.index.get_indexer(key) + mask = indexer == -1 + if mask.any(): + raise KeyError(f"{key[mask]} not in index") + self._set_values(indexer, value, warn=warn) + + def _set_values(self, key, value, warn: bool = True) -> None: + if isinstance(key, (Index, Series)): + key = key._values + + self._mgr = self._mgr.setitem(indexer=key, value=value, warn=warn) + self._maybe_update_cacher() + + def _set_value(self, label, value, takeable: bool = False) -> None: + """ + Quickly set single value at passed label. + + If label is not contained, a new object is created with the label + placed at the end of the result index. + + Parameters + ---------- + label : object + Partial indexing with MultiIndex not allowed. + value : object + Scalar value. + takeable : interpret the index as indexers, default False + """ + if not takeable: + try: + loc = self.index.get_loc(label) + except KeyError: + # set using a non-recursive method + self.loc[label] = value + return + else: + loc = label + + self._set_values(loc, value) + + # ---------------------------------------------------------------------- + # Lookup Caching + + @property + def _is_cached(self) -> bool: + """Return boolean indicating if self is cached or not.""" + return getattr(self, "_cacher", None) is not None + + def _get_cacher(self): + """return my cacher or None""" + cacher = getattr(self, "_cacher", None) + if cacher is not None: + cacher = cacher[1]() + return cacher + + def _reset_cacher(self) -> None: + """ + Reset the cacher. + """ + if hasattr(self, "_cacher"): + del self._cacher + + def _set_as_cached(self, item, cacher) -> None: + """ + Set the _cacher attribute on the calling object with a weakref to + cacher. + """ + if using_copy_on_write(): + return + self._cacher = (item, weakref.ref(cacher)) + + def _clear_item_cache(self) -> None: + # no-op for Series + pass + + def _check_is_chained_assignment_possible(self) -> bool: + """ + See NDFrame._check_is_chained_assignment_possible.__doc__ + """ + if self._is_view and self._is_cached: + ref = self._get_cacher() + if ref is not None and ref._is_mixed_type: + self._check_setitem_copy(t="referent", force=True) + return True + return super()._check_is_chained_assignment_possible() + + def _maybe_update_cacher( + self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False + ) -> None: + """ + See NDFrame._maybe_update_cacher.__doc__ + """ + # for CoW, we never want to update the parent DataFrame cache + # if the Series changed, but don't keep track of any cacher + if using_copy_on_write(): + return + cacher = getattr(self, "_cacher", None) + if cacher is not None: + ref: DataFrame = cacher[1]() + + # we are trying to reference a dead referent, hence + # a copy + if ref is None: + del self._cacher + elif len(self) == len(ref) and self.name in ref.columns: + # GH#42530 self.name must be in ref.columns + # to ensure column still in dataframe + # otherwise, either self or ref has swapped in new arrays + ref._maybe_cache_changed(cacher[0], self, inplace=inplace) + else: + # GH#33675 we have swapped in a new array, so parent + # reference to self is now invalid + ref._item_cache.pop(cacher[0], None) + + super()._maybe_update_cacher( + clear=clear, verify_is_copy=verify_is_copy, inplace=inplace + ) + + # ---------------------------------------------------------------------- + # Unsorted + + def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: + """ + Repeat elements of a Series. + + Returns a new Series where each element of the current Series + is repeated consecutively a given number of times. + + Parameters + ---------- + repeats : int or array of ints + The number of repetitions for each element. This should be a + non-negative integer. Repeating 0 times will return an empty + Series. + axis : None + Unused. Parameter needed for compatibility with DataFrame. + + Returns + ------- + Series + Newly created Series with repeated elements. + + See Also + -------- + Index.repeat : Equivalent function for Index. + numpy.repeat : Similar method for :class:`numpy.ndarray`. + + Examples + -------- + >>> s = pd.Series(['a', 'b', 'c']) + >>> s + 0 a + 1 b + 2 c + dtype: object + >>> s.repeat(2) + 0 a + 0 a + 1 b + 1 b + 2 c + 2 c + dtype: object + >>> s.repeat([1, 2, 3]) + 0 a + 1 b + 1 b + 2 c + 2 c + 2 c + dtype: object + """ + nv.validate_repeat((), {"axis": axis}) + new_index = self.index.repeat(repeats) + new_values = self._values.repeat(repeats) + return self._constructor(new_values, index=new_index, copy=False).__finalize__( + self, method="repeat" + ) + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: Literal[False] = ..., + name: Level = ..., + inplace: Literal[False] = ..., + allow_duplicates: bool = ..., + ) -> DataFrame: + ... + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: Literal[True], + name: Level = ..., + inplace: Literal[False] = ..., + allow_duplicates: bool = ..., + ) -> Series: + ... + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: bool = ..., + name: Level = ..., + inplace: Literal[True], + allow_duplicates: bool = ..., + ) -> None: + ... + + def reset_index( + self, + level: IndexLabel | None = None, + *, + drop: bool = False, + name: Level = lib.no_default, + inplace: bool = False, + allow_duplicates: bool = False, + ) -> DataFrame | Series | None: + """ + Generate a new DataFrame or Series with the index reset. + + This is useful when the index needs to be treated as a column, or + when the index is meaningless and needs to be reset to the default + before another operation. + + Parameters + ---------- + level : int, str, tuple, or list, default optional + For a Series with a MultiIndex, only remove the specified levels + from the index. Removes all levels by default. + drop : bool, default False + Just reset the index, without inserting it as a column in + the new DataFrame. + name : object, optional + The name to use for the column containing the original Series + values. Uses ``self.name`` by default. This argument is ignored + when `drop` is True. + inplace : bool, default False + Modify the Series in place (do not create a new object). + allow_duplicates : bool, default False + Allow duplicate column labels to be created. + + .. versionadded:: 1.5.0 + + Returns + ------- + Series or DataFrame or None + When `drop` is False (the default), a DataFrame is returned. + The newly created columns will come first in the DataFrame, + followed by the original Series values. + When `drop` is True, a `Series` is returned. + In either case, if ``inplace=True``, no value is returned. + + See Also + -------- + DataFrame.reset_index: Analogous function for DataFrame. + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4], name='foo', + ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) + + Generate a DataFrame with default index. + + >>> s.reset_index() + idx foo + 0 a 1 + 1 b 2 + 2 c 3 + 3 d 4 + + To specify the name of the new column use `name`. + + >>> s.reset_index(name='values') + idx values + 0 a 1 + 1 b 2 + 2 c 3 + 3 d 4 + + To generate a new Series with the default set `drop` to True. + + >>> s.reset_index(drop=True) + 0 1 + 1 2 + 2 3 + 3 4 + Name: foo, dtype: int64 + + The `level` parameter is interesting for Series with a multi-level + index. + + >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), + ... np.array(['one', 'two', 'one', 'two'])] + >>> s2 = pd.Series( + ... range(4), name='foo', + ... index=pd.MultiIndex.from_arrays(arrays, + ... names=['a', 'b'])) + + To remove a specific level from the Index, use `level`. + + >>> s2.reset_index(level='a') + a foo + b + one bar 0 + two bar 1 + one baz 2 + two baz 3 + + If `level` is not set, all levels are removed from the Index. + + >>> s2.reset_index() + a b foo + 0 bar one 0 + 1 bar two 1 + 2 baz one 2 + 3 baz two 3 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + if drop: + new_index = default_index(len(self)) + if level is not None: + level_list: Sequence[Hashable] + if not isinstance(level, (tuple, list)): + level_list = [level] + else: + level_list = level + level_list = [self.index._get_level_number(lev) for lev in level_list] + if len(level_list) < self.index.nlevels: + new_index = self.index.droplevel(level_list) + + if inplace: + self.index = new_index + elif using_copy_on_write(): + new_ser = self.copy(deep=False) + new_ser.index = new_index + return new_ser.__finalize__(self, method="reset_index") + else: + return self._constructor( + self._values.copy(), index=new_index, copy=False, dtype=self.dtype + ).__finalize__(self, method="reset_index") + elif inplace: + raise TypeError( + "Cannot reset_index inplace on a Series to create a DataFrame" + ) + else: + if name is lib.no_default: + # For backwards compatibility, keep columns as [0] instead of + # [None] when self.name is None + if self.name is None: + name = 0 + else: + name = self.name + + df = self.to_frame(name) + return df.reset_index( + level=level, drop=drop, allow_duplicates=allow_duplicates + ) + return None + + # ---------------------------------------------------------------------- + # Rendering Methods + + def __repr__(self) -> str: + """ + Return a string representation for a particular Series. + """ + # pylint: disable=invalid-repr-returned + repr_params = fmt.get_series_repr_params() + return self.to_string(**repr_params) + + @overload + def to_string( + self, + buf: None = ..., + na_rep: str = ..., + float_format: str | None = ..., + header: bool = ..., + index: bool = ..., + length: bool = ..., + dtype=..., + name=..., + max_rows: int | None = ..., + min_rows: int | None = ..., + ) -> str: + ... + + @overload + def to_string( + self, + buf: FilePath | WriteBuffer[str], + na_rep: str = ..., + float_format: str | None = ..., + header: bool = ..., + index: bool = ..., + length: bool = ..., + dtype=..., + name=..., + max_rows: int | None = ..., + min_rows: int | None = ..., + ) -> None: + ... + + def to_string( + self, + buf: FilePath | WriteBuffer[str] | None = None, + na_rep: str = "NaN", + float_format: str | None = None, + header: bool = True, + index: bool = True, + length: bool = False, + dtype: bool = False, + name: bool = False, + max_rows: int | None = None, + min_rows: int | None = None, + ) -> str | None: + """ + Render a string representation of the Series. + + Parameters + ---------- + buf : StringIO-like, optional + Buffer to write to. + na_rep : str, optional + String representation of NaN to use, default 'NaN'. + float_format : one-parameter function, optional + Formatter function to apply to columns' elements if they are + floats, default None. + header : bool, default True + Add the Series header (index name). + index : bool, optional + Add index (row) labels, default True. + length : bool, default False + Add the Series length. + dtype : bool, default False + Add the Series dtype. + name : bool, default False + Add the Series name if not None. + max_rows : int, optional + Maximum number of rows to show before truncating. If None, show + all. + min_rows : int, optional + The number of rows to display in a truncated repr (when number + of rows is above `max_rows`). + + Returns + ------- + str or None + String representation of Series if ``buf=None``, otherwise None. + + Examples + -------- + >>> ser = pd.Series([1, 2, 3]).to_string() + >>> ser + '0 1\\n1 2\\n2 3' + """ + formatter = fmt.SeriesFormatter( + self, + name=name, + length=length, + header=header, + index=index, + dtype=dtype, + na_rep=na_rep, + float_format=float_format, + min_rows=min_rows, + max_rows=max_rows, + ) + result = formatter.to_string() + + # catch contract violations + if not isinstance(result, str): + raise AssertionError( + "result must be of type str, type " + f"of result is {repr(type(result).__name__)}" + ) + + if buf is None: + return result + else: + if hasattr(buf, "write"): + buf.write(result) + else: + with open(buf, "w", encoding="utf-8") as f: + f.write(result) + return None + + @doc( + klass=_shared_doc_kwargs["klass"], + storage_options=_shared_docs["storage_options"], + examples=dedent( + """Examples + -------- + >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") + >>> print(s.to_markdown()) + | | animal | + |---:|:---------| + | 0 | elk | + | 1 | pig | + | 2 | dog | + | 3 | quetzal | + + Output markdown with a tabulate option. + + >>> print(s.to_markdown(tablefmt="grid")) + +----+----------+ + | | animal | + +====+==========+ + | 0 | elk | + +----+----------+ + | 1 | pig | + +----+----------+ + | 2 | dog | + +----+----------+ + | 3 | quetzal | + +----+----------+""" + ), + ) + def to_markdown( + self, + buf: IO[str] | None = None, + mode: str = "wt", + index: bool = True, + storage_options: StorageOptions | None = None, + **kwargs, + ) -> str | None: + """ + Print {klass} in Markdown-friendly format. + + Parameters + ---------- + buf : str, Path or StringIO-like, optional, default None + Buffer to write to. If None, the output is returned as a string. + mode : str, optional + Mode in which file is opened, "wt" by default. + index : bool, optional, default True + Add index (row) labels. + + {storage_options} + + **kwargs + These parameters will be passed to `tabulate \ + `_. + + Returns + ------- + str + {klass} in Markdown-friendly format. + + Notes + ----- + Requires the `tabulate `_ package. + + {examples} + """ + return self.to_frame().to_markdown( + buf, mode=mode, index=index, storage_options=storage_options, **kwargs + ) + + # ---------------------------------------------------------------------- + + def items(self) -> Iterable[tuple[Hashable, Any]]: + """ + Lazily iterate over (index, value) tuples. + + This method returns an iterable tuple (index, value). This is + convenient if you want to create a lazy iterator. + + Returns + ------- + iterable + Iterable of tuples containing the (index, value) pairs from a + Series. + + See Also + -------- + DataFrame.items : Iterate over (column name, Series) pairs. + DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. + + Examples + -------- + >>> s = pd.Series(['A', 'B', 'C']) + >>> for index, value in s.items(): + ... print(f"Index : {index}, Value : {value}") + Index : 0, Value : A + Index : 1, Value : B + Index : 2, Value : C + """ + return zip(iter(self.index), iter(self)) + + # ---------------------------------------------------------------------- + # Misc public methods + + def keys(self) -> Index: + """ + Return alias for index. + + Returns + ------- + Index + Index of the Series. + + Examples + -------- + >>> s = pd.Series([1, 2, 3], index=[0, 1, 2]) + >>> s.keys() + Index([0, 1, 2], dtype='int64') + """ + return self.index + + @overload + def to_dict( + self, *, into: type[MutableMappingT] | MutableMappingT + ) -> MutableMappingT: + ... + + @overload + def to_dict(self, *, into: type[dict] = ...) -> dict: + ... + + # error: Incompatible default for argument "into" (default has type "type[ + # dict[Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT") + @deprecate_nonkeyword_arguments( + version="3.0", allowed_args=["self"], name="to_dict" + ) + def to_dict( + self, + into: type[MutableMappingT] + | MutableMappingT = dict, # type: ignore[assignment] + ) -> MutableMappingT: + """ + Convert Series to {label -> value} dict or dict-like object. + + Parameters + ---------- + into : class, default dict + The collections.abc.MutableMapping subclass to use as the return + object. Can be the actual class or an empty instance of the mapping + type you want. If you want a collections.defaultdict, you must + pass it initialized. + + Returns + ------- + collections.abc.MutableMapping + Key-value representation of Series. + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + >>> s.to_dict() + {0: 1, 1: 2, 2: 3, 3: 4} + >>> from collections import OrderedDict, defaultdict + >>> s.to_dict(into=OrderedDict) + OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) + >>> dd = defaultdict(list) + >>> s.to_dict(into=dd) + defaultdict(, {0: 1, 1: 2, 2: 3, 3: 4}) + """ + # GH16122 + into_c = com.standardize_mapping(into) + + if is_object_dtype(self.dtype) or isinstance(self.dtype, ExtensionDtype): + return into_c((k, maybe_box_native(v)) for k, v in self.items()) + else: + # Not an object dtype => all types will be the same so let the default + # indexer return native python type + return into_c(self.items()) + + def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: + """ + Convert Series to DataFrame. + + Parameters + ---------- + name : object, optional + The passed name should substitute for the series name (if it has + one). + + Returns + ------- + DataFrame + DataFrame representation of Series. + + Examples + -------- + >>> s = pd.Series(["a", "b", "c"], + ... name="vals") + >>> s.to_frame() + vals + 0 a + 1 b + 2 c + """ + columns: Index + if name is lib.no_default: + name = self.name + if name is None: + # default to [0], same as we would get with DataFrame(self) + columns = default_index(1) + else: + columns = Index([name]) + else: + columns = Index([name]) + + mgr = self._mgr.to_2d_mgr(columns) + df = self._constructor_expanddim_from_mgr(mgr, axes=mgr.axes) + return df.__finalize__(self, method="to_frame") + + def _set_name( + self, name, inplace: bool = False, deep: bool | None = None + ) -> Series: + """ + Set the Series name. + + Parameters + ---------- + name : str + inplace : bool + Whether to modify `self` directly or return a copy. + deep : bool|None, default None + Whether to do a deep copy, a shallow copy, or Copy on Write(None) + """ + inplace = validate_bool_kwarg(inplace, "inplace") + ser = self if inplace else self.copy(deep and not using_copy_on_write()) + ser.name = name + return ser + + @Appender( + dedent( + """ + Examples + -------- + >>> ser = pd.Series([390., 350., 30., 20.], + ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], + ... name="Max Speed") + >>> ser + Falcon 390.0 + Falcon 350.0 + Parrot 30.0 + Parrot 20.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(["a", "b", "a", "b"]).mean() + a 210.0 + b 185.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level=0).mean() + Falcon 370.0 + Parrot 25.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(ser > 100).mean() + Max Speed + False 25.0 + True 370.0 + Name: Max Speed, dtype: float64 + + **Grouping by Indexes** + + We can groupby different levels of a hierarchical index + using the `level` parameter: + + >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], + ... ['Captive', 'Wild', 'Captive', 'Wild']] + >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) + >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") + >>> ser + Animal Type + Falcon Captive 390.0 + Wild 350.0 + Parrot Captive 30.0 + Wild 20.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level=0).mean() + Animal + Falcon 370.0 + Parrot 25.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level="Type").mean() + Type + Captive 210.0 + Wild 185.0 + Name: Max Speed, dtype: float64 + + We can also choose to include `NA` in group keys or not by defining + `dropna` parameter, the default setting is `True`. + + >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) + >>> ser.groupby(level=0).sum() + a 3 + b 3 + dtype: int64 + + >>> ser.groupby(level=0, dropna=False).sum() + a 3 + b 3 + NaN 3 + dtype: int64 + + >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] + >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") + >>> ser.groupby(["a", "b", "a", np.nan]).mean() + a 210.0 + b 350.0 + Name: Max Speed, dtype: float64 + + >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() + a 210.0 + b 350.0 + NaN 20.0 + Name: Max Speed, dtype: float64 + """ + ) + ) + @Appender(_shared_docs["groupby"] % _shared_doc_kwargs) + def groupby( + self, + by=None, + axis: Axis = 0, + level: IndexLabel | None = None, + as_index: bool = True, + sort: bool = True, + group_keys: bool = True, + observed: bool | lib.NoDefault = lib.no_default, + dropna: bool = True, + ) -> SeriesGroupBy: + from pandas.core.groupby.generic import SeriesGroupBy + + if level is None and by is None: + raise TypeError("You have to supply one of 'by' and 'level'") + if not as_index: + raise TypeError("as_index=False only valid with DataFrame") + axis = self._get_axis_number(axis) + + return SeriesGroupBy( + obj=self, + keys=by, + axis=axis, + level=level, + as_index=as_index, + sort=sort, + group_keys=group_keys, + observed=observed, + dropna=dropna, + ) + + # ---------------------------------------------------------------------- + # Statistics, overridden ndarray methods + + # TODO: integrate bottleneck + def count(self) -> int: + """ + Return number of non-NA/null observations in the Series. + + Returns + ------- + int + Number of non-null values in the Series. + + See Also + -------- + DataFrame.count : Count non-NA cells for each column or row. + + Examples + -------- + >>> s = pd.Series([0.0, 1.0, np.nan]) + >>> s.count() + 2 + """ + return notna(self._values).sum().astype("int64") + + def mode(self, dropna: bool = True) -> Series: + """ + Return the mode(s) of the Series. + + The mode is the value that appears most often. There can be multiple modes. + + Always returns Series even if only one value is returned. + + Parameters + ---------- + dropna : bool, default True + Don't consider counts of NaN/NaT. + + Returns + ------- + Series + Modes of the Series in sorted order. + + Examples + -------- + >>> s = pd.Series([2, 4, 2, 2, 4, None]) + >>> s.mode() + 0 2.0 + dtype: float64 + + More than one mode: + + >>> s = pd.Series([2, 4, 8, 2, 4, None]) + >>> s.mode() + 0 2.0 + 1 4.0 + dtype: float64 + + With and without considering null value: + + >>> s = pd.Series([2, 4, None, None, 4, None]) + >>> s.mode(dropna=False) + 0 NaN + dtype: float64 + >>> s = pd.Series([2, 4, None, None, 4, None]) + >>> s.mode() + 0 4.0 + dtype: float64 + """ + # TODO: Add option for bins like value_counts() + values = self._values + if isinstance(values, np.ndarray): + res_values = algorithms.mode(values, dropna=dropna) + else: + res_values = values._mode(dropna=dropna) + + # Ensure index is type stable (should always use int index) + return self._constructor( + res_values, + index=range(len(res_values)), + name=self.name, + copy=False, + dtype=self.dtype, + ).__finalize__(self, method="mode") + + def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation + """ + Return unique values of Series object. + + Uniques are returned in order of appearance. Hash table-based unique, + therefore does NOT sort. + + Returns + ------- + ndarray or ExtensionArray + The unique values returned as a NumPy array. See Notes. + + See Also + -------- + Series.drop_duplicates : Return Series with duplicate values removed. + unique : Top-level unique method for any 1-d array-like object. + Index.unique : Return Index with unique values from an Index object. + + Notes + ----- + Returns the unique values as a NumPy array. In case of an + extension-array backed Series, a new + :class:`~api.extensions.ExtensionArray` of that type with just + the unique values is returned. This includes + + * Categorical + * Period + * Datetime with Timezone + * Datetime without Timezone + * Timedelta + * Interval + * Sparse + * IntegerNA + + See Examples section. + + Examples + -------- + >>> pd.Series([2, 1, 3, 3], name='A').unique() + array([2, 1, 3]) + + >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() + + ['2016-01-01 00:00:00'] + Length: 1, dtype: datetime64[ns] + + >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') + ... for _ in range(3)]).unique() + + ['2016-01-01 00:00:00-05:00'] + Length: 1, dtype: datetime64[ns, US/Eastern] + + An Categorical will return categories in the order of + appearance and with the same dtype. + + >>> pd.Series(pd.Categorical(list('baabc'))).unique() + ['b', 'a', 'c'] + Categories (3, object): ['a', 'b', 'c'] + >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), + ... ordered=True)).unique() + ['b', 'a', 'c'] + Categories (3, object): ['a' < 'b' < 'c'] + """ + return super().unique() + + @overload + def drop_duplicates( + self, + *, + keep: DropKeep = ..., + inplace: Literal[False] = ..., + ignore_index: bool = ..., + ) -> Series: + ... + + @overload + def drop_duplicates( + self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... + ) -> None: + ... + + @overload + def drop_duplicates( + self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... + ) -> Series | None: + ... + + def drop_duplicates( + self, + *, + keep: DropKeep = "first", + inplace: bool = False, + ignore_index: bool = False, + ) -> Series | None: + """ + Return Series with duplicate values removed. + + Parameters + ---------- + keep : {'first', 'last', ``False``}, default 'first' + Method to handle dropping duplicates: + + - 'first' : Drop duplicates except for the first occurrence. + - 'last' : Drop duplicates except for the last occurrence. + - ``False`` : Drop all duplicates. + + inplace : bool, default ``False`` + If ``True``, performs operation inplace and returns None. + + ignore_index : bool, default ``False`` + If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. + + .. versionadded:: 2.0.0 + + Returns + ------- + Series or None + Series with duplicates dropped or None if ``inplace=True``. + + See Also + -------- + Index.drop_duplicates : Equivalent method on Index. + DataFrame.drop_duplicates : Equivalent method on DataFrame. + Series.duplicated : Related method on Series, indicating duplicate + Series values. + Series.unique : Return unique values as an array. + + Examples + -------- + Generate a Series with duplicated entries. + + >>> s = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama', 'hippo'], + ... name='animal') + >>> s + 0 llama + 1 cow + 2 llama + 3 beetle + 4 llama + 5 hippo + Name: animal, dtype: object + + With the 'keep' parameter, the selection behaviour of duplicated values + can be changed. The value 'first' keeps the first occurrence for each + set of duplicated entries. The default value of keep is 'first'. + + >>> s.drop_duplicates() + 0 llama + 1 cow + 3 beetle + 5 hippo + Name: animal, dtype: object + + The value 'last' for parameter 'keep' keeps the last occurrence for + each set of duplicated entries. + + >>> s.drop_duplicates(keep='last') + 1 cow + 3 beetle + 4 llama + 5 hippo + Name: animal, dtype: object + + The value ``False`` for parameter 'keep' discards all sets of + duplicated entries. + + >>> s.drop_duplicates(keep=False) + 1 cow + 3 beetle + 5 hippo + Name: animal, dtype: object + """ + inplace = validate_bool_kwarg(inplace, "inplace") + result = super().drop_duplicates(keep=keep) + + if ignore_index: + result.index = default_index(len(result)) + + if inplace: + self._update_inplace(result) + return None + else: + return result + + def duplicated(self, keep: DropKeep = "first") -> Series: + """ + Indicate duplicate Series values. + + Duplicated values are indicated as ``True`` values in the resulting + Series. Either all duplicates, all except the first or all except the + last occurrence of duplicates can be indicated. + + Parameters + ---------- + keep : {'first', 'last', False}, default 'first' + Method to handle dropping duplicates: + + - 'first' : Mark duplicates as ``True`` except for the first + occurrence. + - 'last' : Mark duplicates as ``True`` except for the last + occurrence. + - ``False`` : Mark all duplicates as ``True``. + + Returns + ------- + Series[bool] + Series indicating whether each value has occurred in the + preceding values. + + See Also + -------- + Index.duplicated : Equivalent method on pandas.Index. + DataFrame.duplicated : Equivalent method on pandas.DataFrame. + Series.drop_duplicates : Remove duplicate values from Series. + + Examples + -------- + By default, for each set of duplicated values, the first occurrence is + set on False and all others on True: + + >>> animals = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama']) + >>> animals.duplicated() + 0 False + 1 False + 2 True + 3 False + 4 True + dtype: bool + + which is equivalent to + + >>> animals.duplicated(keep='first') + 0 False + 1 False + 2 True + 3 False + 4 True + dtype: bool + + By using 'last', the last occurrence of each set of duplicated values + is set on False and all others on True: + + >>> animals.duplicated(keep='last') + 0 True + 1 False + 2 True + 3 False + 4 False + dtype: bool + + By setting keep on ``False``, all duplicates are True: + + >>> animals.duplicated(keep=False) + 0 True + 1 False + 2 True + 3 False + 4 True + dtype: bool + """ + res = self._duplicated(keep=keep) + result = self._constructor(res, index=self.index, copy=False) + return result.__finalize__(self, method="duplicated") + + def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: + """ + Return the row label of the minimum value. + + If multiple values equal the minimum, the first row label with that + value is returned. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + skipna : bool, default True + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + *args, **kwargs + Additional arguments and keywords have no effect but might be + accepted for compatibility with NumPy. + + Returns + ------- + Index + Label of the minimum value. + + Raises + ------ + ValueError + If the Series is empty. + + See Also + -------- + numpy.argmin : Return indices of the minimum values + along the given axis. + DataFrame.idxmin : Return index of first occurrence of minimum + over requested axis. + Series.idxmax : Return index *label* of the first occurrence + of maximum of values. + + Notes + ----- + This method is the Series version of ``ndarray.argmin``. This method + returns the label of the minimum, while ``ndarray.argmin`` returns + the position. To get the position, use ``series.values.argmin()``. + + Examples + -------- + >>> s = pd.Series(data=[1, None, 4, 1], + ... index=['A', 'B', 'C', 'D']) + >>> s + A 1.0 + B NaN + C 4.0 + D 1.0 + dtype: float64 + + >>> s.idxmin() + 'A' + + If `skipna` is False and there is an NA value in the data, + the function returns ``nan``. + + >>> s.idxmin(skipna=False) + nan + """ + axis = self._get_axis_number(axis) + with warnings.catch_warnings(): + # TODO(3.0): this catching/filtering can be removed + # ignore warning produced by argmin since we will issue a different + # warning for idxmin + warnings.simplefilter("ignore") + i = self.argmin(axis, skipna, *args, **kwargs) + + if i == -1: + # GH#43587 give correct NA value for Index. + warnings.warn( + f"The behavior of {type(self).__name__}.idxmin with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.index._na_value + return self.index[i] + + def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: + """ + Return the row label of the maximum value. + + If multiple values equal the maximum, the first row label with that + value is returned. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + skipna : bool, default True + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + *args, **kwargs + Additional arguments and keywords have no effect but might be + accepted for compatibility with NumPy. + + Returns + ------- + Index + Label of the maximum value. + + Raises + ------ + ValueError + If the Series is empty. + + See Also + -------- + numpy.argmax : Return indices of the maximum values + along the given axis. + DataFrame.idxmax : Return index of first occurrence of maximum + over requested axis. + Series.idxmin : Return index *label* of the first occurrence + of minimum of values. + + Notes + ----- + This method is the Series version of ``ndarray.argmax``. This method + returns the label of the maximum, while ``ndarray.argmax`` returns + the position. To get the position, use ``series.values.argmax()``. + + Examples + -------- + >>> s = pd.Series(data=[1, None, 4, 3, 4], + ... index=['A', 'B', 'C', 'D', 'E']) + >>> s + A 1.0 + B NaN + C 4.0 + D 3.0 + E 4.0 + dtype: float64 + + >>> s.idxmax() + 'C' + + If `skipna` is False and there is an NA value in the data, + the function returns ``nan``. + + >>> s.idxmax(skipna=False) + nan + """ + axis = self._get_axis_number(axis) + with warnings.catch_warnings(): + # TODO(3.0): this catching/filtering can be removed + # ignore warning produced by argmax since we will issue a different + # warning for argmax + warnings.simplefilter("ignore") + i = self.argmax(axis, skipna, *args, **kwargs) + + if i == -1: + # GH#43587 give correct NA value for Index. + warnings.warn( + f"The behavior of {type(self).__name__}.idxmax with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.index._na_value + return self.index[i] + + def round(self, decimals: int = 0, *args, **kwargs) -> Series: + """ + Round each value in a Series to the given number of decimals. + + Parameters + ---------- + decimals : int, default 0 + Number of decimal places to round to. If decimals is negative, + it specifies the number of positions to the left of the decimal point. + *args, **kwargs + Additional arguments and keywords have no effect but might be + accepted for compatibility with NumPy. + + Returns + ------- + Series + Rounded values of the Series. + + See Also + -------- + numpy.around : Round values of an np.array. + DataFrame.round : Round values of a DataFrame. + + Examples + -------- + >>> s = pd.Series([0.1, 1.3, 2.7]) + >>> s.round() + 0 0.0 + 1 1.0 + 2 3.0 + dtype: float64 + """ + nv.validate_round(args, kwargs) + new_mgr = self._mgr.round(decimals=decimals, using_cow=using_copy_on_write()) + return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__( + self, method="round" + ) + + @overload + def quantile( + self, q: float = ..., interpolation: QuantileInterpolation = ... + ) -> float: + ... + + @overload + def quantile( + self, + q: Sequence[float] | AnyArrayLike, + interpolation: QuantileInterpolation = ..., + ) -> Series: + ... + + @overload + def quantile( + self, + q: float | Sequence[float] | AnyArrayLike = ..., + interpolation: QuantileInterpolation = ..., + ) -> float | Series: + ... + + def quantile( + self, + q: float | Sequence[float] | AnyArrayLike = 0.5, + interpolation: QuantileInterpolation = "linear", + ) -> float | Series: + """ + Return value at the given quantile. + + Parameters + ---------- + q : float or array-like, default 0.5 (50% quantile) + The quantile(s) to compute, which can lie in range: 0 <= q <= 1. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + + * linear: `i + (j - i) * (x-i)/(j-i)`, where `(x-i)/(j-i)` is + the fractional part of the index surrounded by `i > j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + + Returns + ------- + float or Series + If ``q`` is an array, a Series will be returned where the + index is ``q`` and the values are the quantiles, otherwise + a float will be returned. + + See Also + -------- + core.window.Rolling.quantile : Calculate the rolling quantile. + numpy.percentile : Returns the q-th percentile(s) of the array elements. + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + >>> s.quantile(.5) + 2.5 + >>> s.quantile([.25, .5, .75]) + 0.25 1.75 + 0.50 2.50 + 0.75 3.25 + dtype: float64 + """ + validate_percentile(q) + + # We dispatch to DataFrame so that core.internals only has to worry + # about 2D cases. + df = self.to_frame() + + result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) + if result.ndim == 2: + result = result.iloc[:, 0] + + if is_list_like(q): + result.name = self.name + idx = Index(q, dtype=np.float64) + return self._constructor(result, index=idx, name=self.name) + else: + # scalar + return result.iloc[0] + + def corr( + self, + other: Series, + method: CorrelationMethod = "pearson", + min_periods: int | None = None, + ) -> float: + """ + Compute correlation with `other` Series, excluding missing values. + + The two `Series` objects are not required to be the same length and will be + aligned internally before the correlation function is applied. + + Parameters + ---------- + other : Series + Series with which to compute the correlation. + method : {'pearson', 'kendall', 'spearman'} or callable + Method used to compute correlation: + + - pearson : Standard correlation coefficient + - kendall : Kendall Tau correlation coefficient + - spearman : Spearman rank correlation + - callable: Callable with input two 1d ndarrays and returning a float. + + .. warning:: + Note that the returned matrix from corr will have 1 along the + diagonals and will be symmetric regardless of the callable's + behavior. + min_periods : int, optional + Minimum number of observations needed to have a valid result. + + Returns + ------- + float + Correlation with other. + + See Also + -------- + DataFrame.corr : Compute pairwise correlation between columns. + DataFrame.corrwith : Compute pairwise correlation with another + DataFrame or Series. + + Notes + ----- + Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. + + * `Pearson correlation coefficient `_ + * `Kendall rank correlation coefficient `_ + * `Spearman's rank correlation coefficient `_ + + Automatic data alignment: as with all pandas operations, automatic data alignment is performed for this method. + ``corr()`` automatically considers values with matching indices. + + Examples + -------- + >>> def histogram_intersection(a, b): + ... v = np.minimum(a, b).sum().round(decimals=1) + ... return v + >>> s1 = pd.Series([.2, .0, .6, .2]) + >>> s2 = pd.Series([.3, .6, .0, .1]) + >>> s1.corr(s2, method=histogram_intersection) + 0.3 + + Pandas auto-aligns the values with matching indices + + >>> s1 = pd.Series([1, 2, 3], index=[0, 1, 2]) + >>> s2 = pd.Series([1, 2, 3], index=[2, 1, 0]) + >>> s1.corr(s2) + -1.0 + """ # noqa: E501 + this, other = self.align(other, join="inner", copy=False) + if len(this) == 0: + return np.nan + + this_values = this.to_numpy(dtype=float, na_value=np.nan, copy=False) + other_values = other.to_numpy(dtype=float, na_value=np.nan, copy=False) + + if method in ["pearson", "spearman", "kendall"] or callable(method): + return nanops.nancorr( + this_values, other_values, method=method, min_periods=min_periods + ) + + raise ValueError( + "method must be either 'pearson', " + "'spearman', 'kendall', or a callable, " + f"'{method}' was supplied" + ) + + def cov( + self, + other: Series, + min_periods: int | None = None, + ddof: int | None = 1, + ) -> float: + """ + Compute covariance with Series, excluding missing values. + + The two `Series` objects are not required to be the same length and + will be aligned internally before the covariance is calculated. + + Parameters + ---------- + other : Series + Series with which to compute the covariance. + min_periods : int, optional + Minimum number of observations needed to have a valid result. + ddof : int, default 1 + Delta degrees of freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + + Returns + ------- + float + Covariance between Series and other normalized by N-1 + (unbiased estimator). + + See Also + -------- + DataFrame.cov : Compute pairwise covariance of columns. + + Examples + -------- + >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) + >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) + >>> s1.cov(s2) + -0.01685762652715874 + """ + this, other = self.align(other, join="inner", copy=False) + if len(this) == 0: + return np.nan + this_values = this.to_numpy(dtype=float, na_value=np.nan, copy=False) + other_values = other.to_numpy(dtype=float, na_value=np.nan, copy=False) + return nanops.nancov( + this_values, other_values, min_periods=min_periods, ddof=ddof + ) + + @doc( + klass="Series", + extra_params="", + other_klass="DataFrame", + examples=dedent( + """ + Difference with previous row + + >>> s = pd.Series([1, 1, 2, 3, 5, 8]) + >>> s.diff() + 0 NaN + 1 0.0 + 2 1.0 + 3 1.0 + 4 2.0 + 5 3.0 + dtype: float64 + + Difference with 3rd previous row + + >>> s.diff(periods=3) + 0 NaN + 1 NaN + 2 NaN + 3 2.0 + 4 4.0 + 5 6.0 + dtype: float64 + + Difference with following row + + >>> s.diff(periods=-1) + 0 0.0 + 1 -1.0 + 2 -1.0 + 3 -2.0 + 4 -3.0 + 5 NaN + dtype: float64 + + Overflow in input dtype + + >>> s = pd.Series([1, 0], dtype=np.uint8) + >>> s.diff() + 0 NaN + 1 255.0 + dtype: float64""" + ), + ) + def diff(self, periods: int = 1) -> Series: + """ + First discrete difference of element. + + Calculates the difference of a {klass} element compared with another + element in the {klass} (default is element in previous row). + + Parameters + ---------- + periods : int, default 1 + Periods to shift for calculating difference, accepts negative + values. + {extra_params} + Returns + ------- + {klass} + First differences of the Series. + + See Also + -------- + {klass}.pct_change: Percent change over given number of periods. + {klass}.shift: Shift index by desired number of periods with an + optional time freq. + {other_klass}.diff: First discrete difference of object. + + Notes + ----- + For boolean dtypes, this uses :meth:`operator.xor` rather than + :meth:`operator.sub`. + The result is calculated according to current dtype in {klass}, + however dtype of the result is always float64. + + Examples + -------- + {examples} + """ + result = algorithms.diff(self._values, periods) + return self._constructor(result, index=self.index, copy=False).__finalize__( + self, method="diff" + ) + + def autocorr(self, lag: int = 1) -> float: + """ + Compute the lag-N autocorrelation. + + This method computes the Pearson correlation between + the Series and its shifted self. + + Parameters + ---------- + lag : int, default 1 + Number of lags to apply before performing autocorrelation. + + Returns + ------- + float + The Pearson correlation between self and self.shift(lag). + + See Also + -------- + Series.corr : Compute the correlation between two Series. + Series.shift : Shift index by desired number of periods. + DataFrame.corr : Compute pairwise correlation of columns. + DataFrame.corrwith : Compute pairwise correlation between rows or + columns of two DataFrame objects. + + Notes + ----- + If the Pearson correlation is not well defined return 'NaN'. + + Examples + -------- + >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) + >>> s.autocorr() # doctest: +ELLIPSIS + 0.10355... + >>> s.autocorr(lag=2) # doctest: +ELLIPSIS + -0.99999... + + If the Pearson correlation is not well defined, then 'NaN' is returned. + + >>> s = pd.Series([1, 0, 0, 0]) + >>> s.autocorr() + nan + """ + return self.corr(cast(Series, self.shift(lag))) + + def dot(self, other: AnyArrayLike) -> Series | np.ndarray: + """ + Compute the dot product between the Series and the columns of other. + + This method computes the dot product between the Series and another + one, or the Series and each columns of a DataFrame, or the Series and + each columns of an array. + + It can also be called using `self @ other`. + + Parameters + ---------- + other : Series, DataFrame or array-like + The other object to compute the dot product with its columns. + + Returns + ------- + scalar, Series or numpy.ndarray + Return the dot product of the Series and other if other is a + Series, the Series of the dot product of Series and each rows of + other if other is a DataFrame or a numpy.ndarray between the Series + and each columns of the numpy array. + + See Also + -------- + DataFrame.dot: Compute the matrix product with the DataFrame. + Series.mul: Multiplication of series and other, element-wise. + + Notes + ----- + The Series and other has to share the same index if other is a Series + or a DataFrame. + + Examples + -------- + >>> s = pd.Series([0, 1, 2, 3]) + >>> other = pd.Series([-1, 2, -3, 4]) + >>> s.dot(other) + 8 + >>> s @ other + 8 + >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) + >>> s.dot(df) + 0 24 + 1 14 + dtype: int64 + >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) + >>> s.dot(arr) + array([24, 14]) + """ + if isinstance(other, (Series, ABCDataFrame)): + common = self.index.union(other.index) + if len(common) > len(self.index) or len(common) > len(other.index): + raise ValueError("matrices are not aligned") + + left = self.reindex(index=common, copy=False) + right = other.reindex(index=common, copy=False) + lvals = left.values + rvals = right.values + else: + lvals = self.values + rvals = np.asarray(other) + if lvals.shape[0] != rvals.shape[0]: + raise Exception( + f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" + ) + + if isinstance(other, ABCDataFrame): + return self._constructor( + np.dot(lvals, rvals), index=other.columns, copy=False + ).__finalize__(self, method="dot") + elif isinstance(other, Series): + return np.dot(lvals, rvals) + elif isinstance(rvals, np.ndarray): + return np.dot(lvals, rvals) + else: # pragma: no cover + raise TypeError(f"unsupported type: {type(other)}") + + def __matmul__(self, other): + """ + Matrix multiplication using binary `@` operator. + """ + return self.dot(other) + + def __rmatmul__(self, other): + """ + Matrix multiplication using binary `@` operator. + """ + return self.dot(np.transpose(other)) + + @doc(base.IndexOpsMixin.searchsorted, klass="Series") + # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" + def searchsorted( # type: ignore[override] + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) + + # ------------------------------------------------------------------- + # Combination + + def _append( + self, to_append, ignore_index: bool = False, verify_integrity: bool = False + ): + from pandas.core.reshape.concat import concat + + if isinstance(to_append, (list, tuple)): + to_concat = [self] + to_concat.extend(to_append) + else: + to_concat = [self, to_append] + if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): + msg = "to_append should be a Series or list/tuple of Series, got DataFrame" + raise TypeError(msg) + return concat( + to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity + ) + + @doc( + _shared_docs["compare"], + dedent( + """ + Returns + ------- + Series or DataFrame + If axis is 0 or 'index' the result will be a Series. + The resulting index will be a MultiIndex with 'self' and 'other' + stacked alternately at the inner level. + + If axis is 1 or 'columns' the result will be a DataFrame. + It will have two columns namely 'self' and 'other'. + + See Also + -------- + DataFrame.compare : Compare with another DataFrame and show differences. + + Notes + ----- + Matching NaNs will not appear as a difference. + + Examples + -------- + >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) + >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) + + Align the differences on columns + + >>> s1.compare(s2) + self other + 1 b a + 3 d b + + Stack the differences on indices + + >>> s1.compare(s2, align_axis=0) + 1 self b + other a + 3 self d + other b + dtype: object + + Keep all original rows + + >>> s1.compare(s2, keep_shape=True) + self other + 0 NaN NaN + 1 b a + 2 NaN NaN + 3 d b + 4 NaN NaN + + Keep all original rows and also all original values + + >>> s1.compare(s2, keep_shape=True, keep_equal=True) + self other + 0 a a + 1 b a + 2 c c + 3 d b + 4 e e + """ + ), + klass=_shared_doc_kwargs["klass"], + ) + def compare( + self, + other: Series, + align_axis: Axis = 1, + keep_shape: bool = False, + keep_equal: bool = False, + result_names: Suffixes = ("self", "other"), + ) -> DataFrame | Series: + return super().compare( + other=other, + align_axis=align_axis, + keep_shape=keep_shape, + keep_equal=keep_equal, + result_names=result_names, + ) + + def combine( + self, + other: Series | Hashable, + func: Callable[[Hashable, Hashable], Hashable], + fill_value: Hashable | None = None, + ) -> Series: + """ + Combine the Series with a Series or scalar according to `func`. + + Combine the Series and `other` using `func` to perform elementwise + selection for combined Series. + `fill_value` is assumed when value is missing at some index + from one of the two objects being combined. + + Parameters + ---------- + other : Series or scalar + The value(s) to be combined with the `Series`. + func : function + Function that takes two scalars as inputs and returns an element. + fill_value : scalar, optional + The value to assume when an index is missing from + one Series or the other. The default specifies to use the + appropriate NaN value for the underlying dtype of the Series. + + Returns + ------- + Series + The result of combining the Series with the other object. + + See Also + -------- + Series.combine_first : Combine Series values, choosing the calling + Series' values first. + + Examples + -------- + Consider 2 Datasets ``s1`` and ``s2`` containing + highest clocked speeds of different birds. + + >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) + >>> s1 + falcon 330.0 + eagle 160.0 + dtype: float64 + >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) + >>> s2 + falcon 345.0 + eagle 200.0 + duck 30.0 + dtype: float64 + + Now, to combine the two datasets and view the highest speeds + of the birds across the two datasets + + >>> s1.combine(s2, max) + duck NaN + eagle 200.0 + falcon 345.0 + dtype: float64 + + In the previous example, the resulting value for duck is missing, + because the maximum of a NaN and a float is a NaN. + So, in the example, we set ``fill_value=0``, + so the maximum value returned will be the value from some dataset. + + >>> s1.combine(s2, max, fill_value=0) + duck 30.0 + eagle 200.0 + falcon 345.0 + dtype: float64 + """ + if fill_value is None: + fill_value = na_value_for_dtype(self.dtype, compat=False) + + if isinstance(other, Series): + # If other is a Series, result is based on union of Series, + # so do this element by element + new_index = self.index.union(other.index) + new_name = ops.get_op_result_name(self, other) + new_values = np.empty(len(new_index), dtype=object) + with np.errstate(all="ignore"): + for i, idx in enumerate(new_index): + lv = self.get(idx, fill_value) + rv = other.get(idx, fill_value) + new_values[i] = func(lv, rv) + else: + # Assume that other is a scalar, so apply the function for + # each element in the Series + new_index = self.index + new_values = np.empty(len(new_index), dtype=object) + with np.errstate(all="ignore"): + new_values[:] = [func(lv, other) for lv in self._values] + new_name = self.name + + # try_float=False is to match agg_series + npvalues = lib.maybe_convert_objects(new_values, try_float=False) + # same_dtype here is a kludge to avoid casting e.g. [True, False] to + # ["True", "False"] + same_dtype = isinstance(self.dtype, (StringDtype, CategoricalDtype)) + res_values = maybe_cast_pointwise_result( + npvalues, self.dtype, same_dtype=same_dtype + ) + return self._constructor(res_values, index=new_index, name=new_name, copy=False) + + def combine_first(self, other) -> Series: + """ + Update null elements with value in the same location in 'other'. + + Combine two Series objects by filling null values in one Series with + non-null values from the other Series. Result index will be the union + of the two indexes. + + Parameters + ---------- + other : Series + The value(s) to be used for filling null values. + + Returns + ------- + Series + The result of combining the provided Series with the other object. + + See Also + -------- + Series.combine : Perform element-wise operation on two Series + using a given function. + + Examples + -------- + >>> s1 = pd.Series([1, np.nan]) + >>> s2 = pd.Series([3, 4, 5]) + >>> s1.combine_first(s2) + 0 1.0 + 1 4.0 + 2 5.0 + dtype: float64 + + Null values still persist if the location of that null value + does not exist in `other` + + >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) + >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) + >>> s1.combine_first(s2) + duck 30.0 + eagle 160.0 + falcon NaN + dtype: float64 + """ + from pandas.core.reshape.concat import concat + + if self.dtype == other.dtype: + if self.index.equals(other.index): + return self.mask(self.isna(), other) + elif self._can_hold_na and not isinstance(self.dtype, SparseDtype): + this, other = self.align(other, join="outer") + return this.mask(this.isna(), other) + + new_index = self.index.union(other.index) + + this = self + # identify the index subset to keep for each series + keep_other = other.index.difference(this.index[notna(this)]) + keep_this = this.index.difference(keep_other) + + this = this.reindex(keep_this, copy=False) + other = other.reindex(keep_other, copy=False) + + if this.dtype.kind == "M" and other.dtype.kind != "M": + other = to_datetime(other) + combined = concat([this, other]) + combined = combined.reindex(new_index, copy=False) + return combined.__finalize__(self, method="combine_first") + + def update(self, other: Series | Sequence | Mapping) -> None: + """ + Modify Series in place using values from passed Series. + + Uses non-NA values from passed Series to make updates. Aligns + on index. + + Parameters + ---------- + other : Series, or object coercible into Series + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.update(pd.Series([4, 5, 6])) + >>> s + 0 4 + 1 5 + 2 6 + dtype: int64 + + >>> s = pd.Series(['a', 'b', 'c']) + >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) + >>> s + 0 d + 1 b + 2 e + dtype: object + + >>> s = pd.Series([1, 2, 3]) + >>> s.update(pd.Series([4, 5, 6, 7, 8])) + >>> s + 0 4 + 1 5 + 2 6 + dtype: int64 + + If ``other`` contains NaNs the corresponding values are not updated + in the original Series. + + >>> s = pd.Series([1, 2, 3]) + >>> s.update(pd.Series([4, np.nan, 6])) + >>> s + 0 4 + 1 2 + 2 6 + dtype: int64 + + ``other`` can also be a non-Series object type + that is coercible into a Series + + >>> s = pd.Series([1, 2, 3]) + >>> s.update([4, np.nan, 6]) + >>> s + 0 4 + 1 2 + 2 6 + dtype: int64 + + >>> s = pd.Series([1, 2, 3]) + >>> s.update({1: 9}) + >>> s + 0 1 + 1 9 + 2 3 + dtype: int64 + """ + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + elif not PYPY and not using_copy_on_write() and self._is_view_after_cow_rules(): + ctr = sys.getrefcount(self) + ref_count = REF_COUNT + if _check_cacher(self): + # see https://github.com/pandas-dev/pandas/pull/56060#discussion_r1399245221 + ref_count += 1 + if ctr <= ref_count: + warnings.warn( + _chained_assignment_warning_method_msg, + FutureWarning, + stacklevel=2, + ) + + if not isinstance(other, Series): + other = Series(other) + + other = other.reindex_like(self) + mask = notna(other) + + self._mgr = self._mgr.putmask(mask=mask, new=other) + self._maybe_update_cacher() + + # ---------------------------------------------------------------------- + # Reindexing, sorting + + @overload + def sort_values( + self, + *, + axis: Axis = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[False] = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool = ..., + key: ValueKeyFunc = ..., + ) -> Series: + ... + + @overload + def sort_values( + self, + *, + axis: Axis = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[True], + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool = ..., + key: ValueKeyFunc = ..., + ) -> None: + ... + + @overload + def sort_values( + self, + *, + axis: Axis = ..., + ascending: bool | Sequence[bool] = ..., + inplace: bool = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool = ..., + key: ValueKeyFunc = ..., + ) -> Series | None: + ... + + def sort_values( + self, + *, + axis: Axis = 0, + ascending: bool | Sequence[bool] = True, + inplace: bool = False, + kind: SortKind = "quicksort", + na_position: NaPosition = "last", + ignore_index: bool = False, + key: ValueKeyFunc | None = None, + ) -> Series | None: + """ + Sort by the values. + + Sort a Series in ascending or descending order by some + criterion. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + ascending : bool or list of bools, default True + If True, sort values in ascending order, otherwise descending. + inplace : bool, default False + If True, perform operation in-place. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See also :func:`numpy.sort` for more + information. 'mergesort' and 'stable' are the only stable algorithms. + na_position : {'first' or 'last'}, default 'last' + Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at + the end. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + key : callable, optional + If not None, apply the key function to the series values + before sorting. This is similar to the `key` argument in the + builtin :meth:`sorted` function, with the notable difference that + this `key` function should be *vectorized*. It should expect a + ``Series`` and return an array-like. + + Returns + ------- + Series or None + Series ordered by values or None if ``inplace=True``. + + See Also + -------- + Series.sort_index : Sort by the Series indices. + DataFrame.sort_values : Sort DataFrame by the values along either axis. + DataFrame.sort_index : Sort DataFrame by indices. + + Examples + -------- + >>> s = pd.Series([np.nan, 1, 3, 10, 5]) + >>> s + 0 NaN + 1 1.0 + 2 3.0 + 3 10.0 + 4 5.0 + dtype: float64 + + Sort values ascending order (default behaviour) + + >>> s.sort_values(ascending=True) + 1 1.0 + 2 3.0 + 4 5.0 + 3 10.0 + 0 NaN + dtype: float64 + + Sort values descending order + + >>> s.sort_values(ascending=False) + 3 10.0 + 4 5.0 + 2 3.0 + 1 1.0 + 0 NaN + dtype: float64 + + Sort values putting NAs first + + >>> s.sort_values(na_position='first') + 0 NaN + 1 1.0 + 2 3.0 + 4 5.0 + 3 10.0 + dtype: float64 + + Sort a series of strings + + >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) + >>> s + 0 z + 1 b + 2 d + 3 a + 4 c + dtype: object + + >>> s.sort_values() + 3 a + 1 b + 4 c + 2 d + 0 z + dtype: object + + Sort using a key function. Your `key` function will be + given the ``Series`` of values and should return an array-like. + + >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) + >>> s.sort_values() + 1 B + 3 D + 0 a + 2 c + 4 e + dtype: object + >>> s.sort_values(key=lambda x: x.str.lower()) + 0 a + 1 B + 2 c + 3 D + 4 e + dtype: object + + NumPy ufuncs work well here. For example, we can + sort by the ``sin`` of the value + + >>> s = pd.Series([-4, -2, 0, 2, 4]) + >>> s.sort_values(key=np.sin) + 1 -2 + 4 4 + 2 0 + 0 -4 + 3 2 + dtype: int64 + + More complicated user-defined functions can be used, + as long as they expect a Series and return an array-like + + >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) + 0 -4 + 3 2 + 4 4 + 1 -2 + 2 0 + dtype: int64 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + # Validate the axis parameter + self._get_axis_number(axis) + + # GH 5856/5853 + if inplace and self._is_cached: + raise ValueError( + "This Series is a view of some other array, to " + "sort in-place you must create a copy" + ) + + if is_list_like(ascending): + ascending = cast(Sequence[bool], ascending) + if len(ascending) != 1: + raise ValueError( + f"Length of ascending ({len(ascending)}) must be 1 for Series" + ) + ascending = ascending[0] + + ascending = validate_ascending(ascending) + + if na_position not in ["first", "last"]: + raise ValueError(f"invalid na_position: {na_position}") + + # GH 35922. Make sorting stable by leveraging nargsort + if key: + values_to_sort = cast(Series, ensure_key_mapped(self, key))._values + else: + values_to_sort = self._values + sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) + + if is_range_indexer(sorted_index, len(sorted_index)): + if inplace: + return self._update_inplace(self) + return self.copy(deep=None) + + result = self._constructor( + self._values[sorted_index], index=self.index[sorted_index], copy=False + ) + + if ignore_index: + result.index = default_index(len(sorted_index)) + + if not inplace: + return result.__finalize__(self, method="sort_values") + self._update_inplace(result) + return None + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[True], + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> None: + ... + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[False] = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> Series: + ... + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: bool = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> Series | None: + ... + + def sort_index( + self, + *, + axis: Axis = 0, + level: IndexLabel | None = None, + ascending: bool | Sequence[bool] = True, + inplace: bool = False, + kind: SortKind = "quicksort", + na_position: NaPosition = "last", + sort_remaining: bool = True, + ignore_index: bool = False, + key: IndexKeyFunc | None = None, + ) -> Series | None: + """ + Sort Series by index labels. + + Returns a new Series sorted by label if `inplace` argument is + ``False``, otherwise updates the original series and returns None. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + level : int, optional + If not None, sort on values in specified index level(s). + ascending : bool or list-like of bools, default True + Sort ascending vs. descending. When the index is a MultiIndex the + sort direction can be controlled for each level individually. + inplace : bool, default False + If True, perform operation in-place. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See also :func:`numpy.sort` for more + information. 'mergesort' and 'stable' are the only stable algorithms. For + DataFrames, this option is only applied when sorting on a single + column or label. + na_position : {'first', 'last'}, default 'last' + If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. + Not implemented for MultiIndex. + sort_remaining : bool, default True + If True and sorting by level and index is multilevel, sort by other + levels too (in order) after sorting by specified level. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + key : callable, optional + If not None, apply the key function to the index values + before sorting. This is similar to the `key` argument in the + builtin :meth:`sorted` function, with the notable difference that + this `key` function should be *vectorized*. It should expect an + ``Index`` and return an ``Index`` of the same shape. + + Returns + ------- + Series or None + The original Series sorted by the labels or None if ``inplace=True``. + + See Also + -------- + DataFrame.sort_index: Sort DataFrame by the index. + DataFrame.sort_values: Sort DataFrame by the value. + Series.sort_values : Sort Series by the value. + + Examples + -------- + >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) + >>> s.sort_index() + 1 c + 2 b + 3 a + 4 d + dtype: object + + Sort Descending + + >>> s.sort_index(ascending=False) + 4 d + 3 a + 2 b + 1 c + dtype: object + + By default NaNs are put at the end, but use `na_position` to place + them at the beginning + + >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) + >>> s.sort_index(na_position='first') + NaN d + 1.0 c + 2.0 b + 3.0 a + dtype: object + + Specify index level to sort + + >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', + ... 'baz', 'baz', 'bar', 'bar']), + ... np.array(['two', 'one', 'two', 'one', + ... 'two', 'one', 'two', 'one'])] + >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) + >>> s.sort_index(level=1) + bar one 8 + baz one 6 + foo one 4 + qux one 2 + bar two 7 + baz two 5 + foo two 3 + qux two 1 + dtype: int64 + + Does not sort by remaining levels when sorting by levels + + >>> s.sort_index(level=1, sort_remaining=False) + qux one 2 + foo one 4 + baz one 6 + bar one 8 + qux two 1 + foo two 3 + baz two 5 + bar two 7 + dtype: int64 + + Apply a key function before sorting + + >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) + >>> s.sort_index(key=lambda x : x.str.lower()) + A 1 + b 2 + C 3 + d 4 + dtype: int64 + """ + + return super().sort_index( + axis=axis, + level=level, + ascending=ascending, + inplace=inplace, + kind=kind, + na_position=na_position, + sort_remaining=sort_remaining, + ignore_index=ignore_index, + key=key, + ) + + def argsort( + self, + axis: Axis = 0, + kind: SortKind = "quicksort", + order: None = None, + stable: None = None, + ) -> Series: + """ + Return the integer indices that would sort the Series values. + + Override ndarray.argsort. Argsorts the value, omitting NA/null values, + and places the result in the same locations as the non-NA values. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See :func:`numpy.sort` for more + information. 'mergesort' and 'stable' are the only stable algorithms. + order : None + Has no effect but is accepted for compatibility with numpy. + stable : None + Has no effect but is accepted for compatibility with numpy. + + Returns + ------- + Series[np.intp] + Positions of values within the sort order with -1 indicating + nan values. + + See Also + -------- + numpy.ndarray.argsort : Returns the indices that would sort this array. + + Examples + -------- + >>> s = pd.Series([3, 2, 1]) + >>> s.argsort() + 0 2 + 1 1 + 2 0 + dtype: int64 + """ + if axis != -1: + # GH#54257 We allow -1 here so that np.argsort(series) works + self._get_axis_number(axis) + + values = self._values + mask = isna(values) + + if mask.any(): + # TODO(3.0): once this deprecation is enforced we can call + # self.array.argsort directly, which will close GH#43840 and + # GH#12694 + warnings.warn( + "The behavior of Series.argsort in the presence of NA values is " + "deprecated. In a future version, NA values will be ordered " + "last instead of set to -1.", + FutureWarning, + stacklevel=find_stack_level(), + ) + result = np.full(len(self), -1, dtype=np.intp) + notmask = ~mask + result[notmask] = np.argsort(values[notmask], kind=kind) + else: + result = np.argsort(values, kind=kind) + + res = self._constructor( + result, index=self.index, name=self.name, dtype=np.intp, copy=False + ) + return res.__finalize__(self, method="argsort") + + def nlargest( + self, n: int = 5, keep: Literal["first", "last", "all"] = "first" + ) -> Series: + """ + Return the largest `n` elements. + + Parameters + ---------- + n : int, default 5 + Return this many descending sorted values. + keep : {'first', 'last', 'all'}, default 'first' + When there are duplicate values that cannot all fit in a + Series of `n` elements: + + - ``first`` : return the first `n` occurrences in order + of appearance. + - ``last`` : return the last `n` occurrences in reverse + order of appearance. + - ``all`` : keep all occurrences. This can result in a Series of + size larger than `n`. + + Returns + ------- + Series + The `n` largest values in the Series, sorted in decreasing order. + + See Also + -------- + Series.nsmallest: Get the `n` smallest elements. + Series.sort_values: Sort Series by values. + Series.head: Return the first `n` rows. + + Notes + ----- + Faster than ``.sort_values(ascending=False).head(n)`` for small `n` + relative to the size of the ``Series`` object. + + Examples + -------- + >>> countries_population = {"Italy": 59000000, "France": 65000000, + ... "Malta": 434000, "Maldives": 434000, + ... "Brunei": 434000, "Iceland": 337000, + ... "Nauru": 11300, "Tuvalu": 11300, + ... "Anguilla": 11300, "Montserrat": 5200} + >>> s = pd.Series(countries_population) + >>> s + Italy 59000000 + France 65000000 + Malta 434000 + Maldives 434000 + Brunei 434000 + Iceland 337000 + Nauru 11300 + Tuvalu 11300 + Anguilla 11300 + Montserrat 5200 + dtype: int64 + + The `n` largest elements where ``n=5`` by default. + + >>> s.nlargest() + France 65000000 + Italy 59000000 + Malta 434000 + Maldives 434000 + Brunei 434000 + dtype: int64 + + The `n` largest elements where ``n=3``. Default `keep` value is 'first' + so Malta will be kept. + + >>> s.nlargest(3) + France 65000000 + Italy 59000000 + Malta 434000 + dtype: int64 + + The `n` largest elements where ``n=3`` and keeping the last duplicates. + Brunei will be kept since it is the last with value 434000 based on + the index order. + + >>> s.nlargest(3, keep='last') + France 65000000 + Italy 59000000 + Brunei 434000 + dtype: int64 + + The `n` largest elements where ``n=3`` with all duplicates kept. Note + that the returned Series has five elements due to the three duplicates. + + >>> s.nlargest(3, keep='all') + France 65000000 + Italy 59000000 + Malta 434000 + Maldives 434000 + Brunei 434000 + dtype: int64 + """ + return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() + + def nsmallest( + self, n: int = 5, keep: Literal["first", "last", "all"] = "first" + ) -> Series: + """ + Return the smallest `n` elements. + + Parameters + ---------- + n : int, default 5 + Return this many ascending sorted values. + keep : {'first', 'last', 'all'}, default 'first' + When there are duplicate values that cannot all fit in a + Series of `n` elements: + + - ``first`` : return the first `n` occurrences in order + of appearance. + - ``last`` : return the last `n` occurrences in reverse + order of appearance. + - ``all`` : keep all occurrences. This can result in a Series of + size larger than `n`. + + Returns + ------- + Series + The `n` smallest values in the Series, sorted in increasing order. + + See Also + -------- + Series.nlargest: Get the `n` largest elements. + Series.sort_values: Sort Series by values. + Series.head: Return the first `n` rows. + + Notes + ----- + Faster than ``.sort_values().head(n)`` for small `n` relative to + the size of the ``Series`` object. + + Examples + -------- + >>> countries_population = {"Italy": 59000000, "France": 65000000, + ... "Brunei": 434000, "Malta": 434000, + ... "Maldives": 434000, "Iceland": 337000, + ... "Nauru": 11300, "Tuvalu": 11300, + ... "Anguilla": 11300, "Montserrat": 5200} + >>> s = pd.Series(countries_population) + >>> s + Italy 59000000 + France 65000000 + Brunei 434000 + Malta 434000 + Maldives 434000 + Iceland 337000 + Nauru 11300 + Tuvalu 11300 + Anguilla 11300 + Montserrat 5200 + dtype: int64 + + The `n` smallest elements where ``n=5`` by default. + + >>> s.nsmallest() + Montserrat 5200 + Nauru 11300 + Tuvalu 11300 + Anguilla 11300 + Iceland 337000 + dtype: int64 + + The `n` smallest elements where ``n=3``. Default `keep` value is + 'first' so Nauru and Tuvalu will be kept. + + >>> s.nsmallest(3) + Montserrat 5200 + Nauru 11300 + Tuvalu 11300 + dtype: int64 + + The `n` smallest elements where ``n=3`` and keeping the last + duplicates. Anguilla and Tuvalu will be kept since they are the last + with value 11300 based on the index order. + + >>> s.nsmallest(3, keep='last') + Montserrat 5200 + Anguilla 11300 + Tuvalu 11300 + dtype: int64 + + The `n` smallest elements where ``n=3`` with all duplicates kept. Note + that the returned Series has four elements due to the three duplicates. + + >>> s.nsmallest(3, keep='all') + Montserrat 5200 + Nauru 11300 + Tuvalu 11300 + Anguilla 11300 + dtype: int64 + """ + return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() + + @doc( + klass=_shared_doc_kwargs["klass"], + extra_params=dedent( + """copy : bool, default True + Whether to copy underlying data. + + .. note:: + The `copy` keyword will change behavior in pandas 3.0. + `Copy-on-Write + `__ + will be enabled by default, which means that all methods with a + `copy` keyword will use a lazy copy mechanism to defer the copy and + ignore the `copy` keyword. The `copy` keyword will be removed in a + future version of pandas. + + You can already get the future behavior and improvements through + enabling copy on write ``pd.options.mode.copy_on_write = True``""" + ), + examples=dedent( + """\ + Examples + -------- + >>> s = pd.Series( + ... ["A", "B", "A", "C"], + ... index=[ + ... ["Final exam", "Final exam", "Coursework", "Coursework"], + ... ["History", "Geography", "History", "Geography"], + ... ["January", "February", "March", "April"], + ... ], + ... ) + >>> s + Final exam History January A + Geography February B + Coursework History March A + Geography April C + dtype: object + + In the following example, we will swap the levels of the indices. + Here, we will swap the levels column-wise, but levels can be swapped row-wise + in a similar manner. Note that column-wise is the default behaviour. + By not supplying any arguments for i and j, we swap the last and second to + last indices. + + >>> s.swaplevel() + Final exam January History A + February Geography B + Coursework March History A + April Geography C + dtype: object + + By supplying one argument, we can choose which index to swap the last + index with. We can for example swap the first index with the last one as + follows. + + >>> s.swaplevel(0) + January History Final exam A + February Geography Final exam B + March History Coursework A + April Geography Coursework C + dtype: object + + We can also define explicitly which indices we want to swap by supplying values + for both i and j. Here, we for example swap the first and second indices. + + >>> s.swaplevel(0, 1) + History Final exam January A + Geography Final exam February B + History Coursework March A + Geography Coursework April C + dtype: object""" + ), + ) + def swaplevel( + self, i: Level = -2, j: Level = -1, copy: bool | None = None + ) -> Series: + """ + Swap levels i and j in a :class:`MultiIndex`. + + Default is to swap the two innermost levels of the index. + + Parameters + ---------- + i, j : int or str + Levels of the indices to be swapped. Can pass level name as string. + {extra_params} + + Returns + ------- + {klass} + {klass} with levels swapped in MultiIndex. + + {examples} + """ + assert isinstance(self.index, MultiIndex) + result = self.copy(deep=copy and not using_copy_on_write()) + result.index = self.index.swaplevel(i, j) + return result + + def reorder_levels(self, order: Sequence[Level]) -> Series: + """ + Rearrange index levels using input order. + + May not drop or duplicate levels. + + Parameters + ---------- + order : list of int representing new level order + Reference level by number or key. + + Returns + ------- + type of caller (new object) + + Examples + -------- + >>> arrays = [np.array(["dog", "dog", "cat", "cat", "bird", "bird"]), + ... np.array(["white", "black", "white", "black", "white", "black"])] + >>> s = pd.Series([1, 2, 3, 3, 5, 2], index=arrays) + >>> s + dog white 1 + black 2 + cat white 3 + black 3 + bird white 5 + black 2 + dtype: int64 + >>> s.reorder_levels([1, 0]) + white dog 1 + black dog 2 + white cat 3 + black cat 3 + white bird 5 + black bird 2 + dtype: int64 + """ + if not isinstance(self.index, MultiIndex): # pragma: no cover + raise Exception("Can only reorder levels on a hierarchical axis.") + + result = self.copy(deep=None) + assert isinstance(result.index, MultiIndex) + result.index = result.index.reorder_levels(order) + return result + + def explode(self, ignore_index: bool = False) -> Series: + """ + Transform each element of a list-like to a row. + + Parameters + ---------- + ignore_index : bool, default False + If True, the resulting index will be labeled 0, 1, …, n - 1. + + Returns + ------- + Series + Exploded lists to rows; index will be duplicated for these rows. + + See Also + -------- + Series.str.split : Split string values on specified separator. + Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex + to produce DataFrame. + DataFrame.melt : Unpivot a DataFrame from wide format to long format. + DataFrame.explode : Explode a DataFrame from list-like + columns to long format. + + Notes + ----- + This routine will explode list-likes including lists, tuples, sets, + Series, and np.ndarray. The result dtype of the subset rows will + be object. Scalars will be returned unchanged, and empty list-likes will + result in a np.nan for that row. In addition, the ordering of elements in + the output will be non-deterministic when exploding sets. + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) + >>> s + 0 [1, 2, 3] + 1 foo + 2 [] + 3 [3, 4] + dtype: object + + >>> s.explode() + 0 1 + 0 2 + 0 3 + 1 foo + 2 NaN + 3 3 + 3 4 + dtype: object + """ + if isinstance(self.dtype, ExtensionDtype): + values, counts = self._values._explode() + elif len(self) and is_object_dtype(self.dtype): + values, counts = reshape.explode(np.asarray(self._values)) + else: + result = self.copy() + return result.reset_index(drop=True) if ignore_index else result + + if ignore_index: + index: Index = default_index(len(values)) + else: + index = self.index.repeat(counts) + + return self._constructor(values, index=index, name=self.name, copy=False) + + def unstack( + self, + level: IndexLabel = -1, + fill_value: Hashable | None = None, + sort: bool = True, + ) -> DataFrame: + """ + Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. + + Parameters + ---------- + level : int, str, or list of these, default last level + Level(s) to unstack, can pass level name. + fill_value : scalar value, default None + Value to use when replacing NaN values. + sort : bool, default True + Sort the level(s) in the resulting MultiIndex columns. + + Returns + ------- + DataFrame + Unstacked Series. + + Notes + ----- + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4], + ... index=pd.MultiIndex.from_product([['one', 'two'], + ... ['a', 'b']])) + >>> s + one a 1 + b 2 + two a 3 + b 4 + dtype: int64 + + >>> s.unstack(level=-1) + a b + one 1 2 + two 3 4 + + >>> s.unstack(level=0) + one two + a 1 3 + b 2 4 + """ + from pandas.core.reshape.reshape import unstack + + return unstack(self, level, fill_value, sort) + + # ---------------------------------------------------------------------- + # function application + + def map( + self, + arg: Callable | Mapping | Series, + na_action: Literal["ignore"] | None = None, + ) -> Series: + """ + Map values of Series according to an input mapping or function. + + Used for substituting each value in a Series with another value, + that may be derived from a function, a ``dict`` or + a :class:`Series`. + + Parameters + ---------- + arg : function, collections.abc.Mapping subclass or Series + Mapping correspondence. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NaN values, without passing them to the + mapping correspondence. + + Returns + ------- + Series + Same index as caller. + + See Also + -------- + Series.apply : For applying more complex functions on a Series. + Series.replace: Replace values given in `to_replace` with `value`. + DataFrame.apply : Apply a function row-/column-wise. + DataFrame.map : Apply a function elementwise on a whole DataFrame. + + Notes + ----- + When ``arg`` is a dictionary, values in Series that are not in the + dictionary (as keys) are converted to ``NaN``. However, if the + dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. + provides a method for default values), then this default is used + rather than ``NaN``. + + Examples + -------- + >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) + >>> s + 0 cat + 1 dog + 2 NaN + 3 rabbit + dtype: object + + ``map`` accepts a ``dict`` or a ``Series``. Values that are not found + in the ``dict`` are converted to ``NaN``, unless the dict has a default + value (e.g. ``defaultdict``): + + >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) + 0 kitten + 1 puppy + 2 NaN + 3 NaN + dtype: object + + It also accepts a function: + + >>> s.map('I am a {}'.format) + 0 I am a cat + 1 I am a dog + 2 I am a nan + 3 I am a rabbit + dtype: object + + To avoid applying the function to missing values (and keep them as + ``NaN``) ``na_action='ignore'`` can be used: + + >>> s.map('I am a {}'.format, na_action='ignore') + 0 I am a cat + 1 I am a dog + 2 NaN + 3 I am a rabbit + dtype: object + """ + new_values = self._map_values(arg, na_action=na_action) + return self._constructor(new_values, index=self.index, copy=False).__finalize__( + self, method="map" + ) + + def _gotitem(self, key, ndim, subset=None) -> Self: + """ + Sub-classes to define. Return a sliced object. + + Parameters + ---------- + key : string / list of selections + ndim : {1, 2} + Requested ndim of result. + subset : object, default None + Subset to act on. + """ + return self + + _agg_see_also_doc = dedent( + """ + See Also + -------- + Series.apply : Invoke function on a Series. + Series.transform : Transform function producing a Series with like indexes. + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + dtype: int64 + + >>> s.agg('min') + 1 + + >>> s.agg(['min', 'max']) + min 1 + max 4 + dtype: int64 + """ + ) + + @doc( + _shared_docs["aggregate"], + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + ) + def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): + # Validate the axis parameter + self._get_axis_number(axis) + + # if func is None, will switch to user-provided "named aggregation" kwargs + if func is None: + func = dict(kwargs.items()) + + op = SeriesApply(self, func, args=args, kwargs=kwargs) + result = op.agg() + return result + + agg = aggregate + + @doc( + _shared_docs["transform"], + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], + ) + def transform( + self, func: AggFuncType, axis: Axis = 0, *args, **kwargs + ) -> DataFrame | Series: + # Validate axis argument + self._get_axis_number(axis) + ser = ( + self.copy(deep=False) + if using_copy_on_write() or warn_copy_on_write() + else self + ) + result = SeriesApply(ser, func=func, args=args, kwargs=kwargs).transform() + return result + + def apply( + self, + func: AggFuncType, + convert_dtype: bool | lib.NoDefault = lib.no_default, + args: tuple[Any, ...] = (), + *, + by_row: Literal[False, "compat"] = "compat", + **kwargs, + ) -> DataFrame | Series: + """ + Invoke function on values of Series. + + Can be ufunc (a NumPy function that applies to the entire Series) + or a Python function that only works on single values. + + Parameters + ---------- + func : function + Python function or NumPy ufunc to apply. + convert_dtype : bool, default True + Try to find better dtype for elementwise function results. If + False, leave as dtype=object. Note that the dtype is always + preserved for some extension array dtypes, such as Categorical. + + .. deprecated:: 2.1.0 + ``convert_dtype`` has been deprecated. Do ``ser.astype(object).apply()`` + instead if you want ``convert_dtype=False``. + args : tuple + Positional arguments passed to func after the series value. + by_row : False or "compat", default "compat" + If ``"compat"`` and func is a callable, func will be passed each element of + the Series, like ``Series.map``. If func is a list or dict of + callables, will first try to translate each func into pandas methods. If + that doesn't work, will try call to apply again with ``by_row="compat"`` + and if that fails, will call apply again with ``by_row=False`` + (backward compatible). + If False, the func will be passed the whole Series at once. + + ``by_row`` has no effect when ``func`` is a string. + + .. versionadded:: 2.1.0 + **kwargs + Additional keyword arguments passed to func. + + Returns + ------- + Series or DataFrame + If func returns a Series object the result will be a DataFrame. + + See Also + -------- + Series.map: For element-wise operations. + Series.agg: Only perform aggregating type operations. + Series.transform: Only perform transforming type operations. + + Notes + ----- + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + Create a series with typical summer temperatures for each city. + + >>> s = pd.Series([20, 21, 12], + ... index=['London', 'New York', 'Helsinki']) + >>> s + London 20 + New York 21 + Helsinki 12 + dtype: int64 + + Square the values by defining a function and passing it as an + argument to ``apply()``. + + >>> def square(x): + ... return x ** 2 + >>> s.apply(square) + London 400 + New York 441 + Helsinki 144 + dtype: int64 + + Square the values by passing an anonymous function as an + argument to ``apply()``. + + >>> s.apply(lambda x: x ** 2) + London 400 + New York 441 + Helsinki 144 + dtype: int64 + + Define a custom function that needs additional positional + arguments and pass these additional arguments using the + ``args`` keyword. + + >>> def subtract_custom_value(x, custom_value): + ... return x - custom_value + + >>> s.apply(subtract_custom_value, args=(5,)) + London 15 + New York 16 + Helsinki 7 + dtype: int64 + + Define a custom function that takes keyword arguments + and pass these arguments to ``apply``. + + >>> def add_custom_values(x, **kwargs): + ... for month in kwargs: + ... x += kwargs[month] + ... return x + + >>> s.apply(add_custom_values, june=30, july=20, august=25) + London 95 + New York 96 + Helsinki 87 + dtype: int64 + + Use a function from the Numpy library. + + >>> s.apply(np.log) + London 2.995732 + New York 3.044522 + Helsinki 2.484907 + dtype: float64 + """ + return SeriesApply( + self, + func, + convert_dtype=convert_dtype, + by_row=by_row, + args=args, + kwargs=kwargs, + ).apply() + + def _reindex_indexer( + self, + new_index: Index | None, + indexer: npt.NDArray[np.intp] | None, + copy: bool | None, + ) -> Series: + # Note: new_index is None iff indexer is None + # if not None, indexer is np.intp + if indexer is None and ( + new_index is None or new_index.names == self.index.names + ): + if using_copy_on_write(): + return self.copy(deep=copy) + if copy or copy is None: + return self.copy(deep=copy) + return self + + new_values = algorithms.take_nd( + self._values, indexer, allow_fill=True, fill_value=None + ) + return self._constructor(new_values, index=new_index, copy=False) + + def _needs_reindex_multi(self, axes, method, level) -> bool: + """ + Check if we do need a multi reindex; this is for compat with + higher dims. + """ + return False + + @overload + def rename( + self, + index: Renamer | Hashable | None = ..., + *, + axis: Axis | None = ..., + copy: bool = ..., + inplace: Literal[True], + level: Level | None = ..., + errors: IgnoreRaise = ..., + ) -> None: + ... + + @overload + def rename( + self, + index: Renamer | Hashable | None = ..., + *, + axis: Axis | None = ..., + copy: bool = ..., + inplace: Literal[False] = ..., + level: Level | None = ..., + errors: IgnoreRaise = ..., + ) -> Series: + ... + + @overload + def rename( + self, + index: Renamer | Hashable | None = ..., + *, + axis: Axis | None = ..., + copy: bool = ..., + inplace: bool = ..., + level: Level | None = ..., + errors: IgnoreRaise = ..., + ) -> Series | None: + ... + + def rename( + self, + index: Renamer | Hashable | None = None, + *, + axis: Axis | None = None, + copy: bool | None = None, + inplace: bool = False, + level: Level | None = None, + errors: IgnoreRaise = "ignore", + ) -> Series | None: + """ + Alter Series index labels or name. + + Function / dict values must be unique (1-to-1). Labels not contained in + a dict / Series will be left as-is. Extra labels listed don't throw an + error. + + Alternatively, change ``Series.name`` with a scalar value. + + See the :ref:`user guide ` for more. + + Parameters + ---------- + index : scalar, hashable sequence, dict-like or function optional + Functions or dict-like are transformations to apply to + the index. + Scalar or hashable sequence-like will alter the ``Series.name`` + attribute. + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + copy : bool, default True + Also copy underlying data. + + .. note:: + The `copy` keyword will change behavior in pandas 3.0. + `Copy-on-Write + `__ + will be enabled by default, which means that all methods with a + `copy` keyword will use a lazy copy mechanism to defer the copy and + ignore the `copy` keyword. The `copy` keyword will be removed in a + future version of pandas. + + You can already get the future behavior and improvements through + enabling copy on write ``pd.options.mode.copy_on_write = True`` + inplace : bool, default False + Whether to return a new Series. If True the value of copy is ignored. + level : int or level name, default None + In case of MultiIndex, only rename labels in the specified level. + errors : {'ignore', 'raise'}, default 'ignore' + If 'raise', raise `KeyError` when a `dict-like mapper` or + `index` contains labels that are not present in the index being transformed. + If 'ignore', existing keys will be renamed and extra keys will be ignored. + + Returns + ------- + Series or None + Series with index labels or name altered or None if ``inplace=True``. + + See Also + -------- + DataFrame.rename : Corresponding DataFrame method. + Series.rename_axis : Set the name of the axis. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s + 0 1 + 1 2 + 2 3 + dtype: int64 + >>> s.rename("my_name") # scalar, changes Series.name + 0 1 + 1 2 + 2 3 + Name: my_name, dtype: int64 + >>> s.rename(lambda x: x ** 2) # function, changes labels + 0 1 + 1 2 + 4 3 + dtype: int64 + >>> s.rename({1: 3, 2: 5}) # mapping, changes labels + 0 1 + 3 2 + 5 3 + dtype: int64 + """ + if axis is not None: + # Make sure we raise if an invalid 'axis' is passed. + axis = self._get_axis_number(axis) + + if callable(index) or is_dict_like(index): + # error: Argument 1 to "_rename" of "NDFrame" has incompatible + # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], + # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, + # Hashable], Callable[[Any], Hashable], None]" + return super()._rename( + index, # type: ignore[arg-type] + copy=copy, + inplace=inplace, + level=level, + errors=errors, + ) + else: + return self._set_name(index, inplace=inplace, deep=copy) + + @Appender( + """ + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s + 0 1 + 1 2 + 2 3 + dtype: int64 + + >>> s.set_axis(['a', 'b', 'c'], axis=0) + a 1 + b 2 + c 3 + dtype: int64 + """ + ) + @Substitution( + klass=_shared_doc_kwargs["klass"], + axes_single_arg=_shared_doc_kwargs["axes_single_arg"], + extended_summary_sub="", + axis_description_sub="", + see_also_sub="", + ) + @Appender(NDFrame.set_axis.__doc__) + def set_axis( + self, + labels, + *, + axis: Axis = 0, + copy: bool | None = None, + ) -> Series: + return super().set_axis(labels, axis=axis, copy=copy) + + # error: Cannot determine type of 'reindex' + @doc( + NDFrame.reindex, # type: ignore[has-type] + klass=_shared_doc_kwargs["klass"], + optional_reindex=_shared_doc_kwargs["optional_reindex"], + ) + def reindex( # type: ignore[override] + self, + index=None, + *, + axis: Axis | None = None, + method: ReindexMethod | None = None, + copy: bool | None = None, + level: Level | None = None, + fill_value: Scalar | None = None, + limit: int | None = None, + tolerance=None, + ) -> Series: + return super().reindex( + index=index, + method=method, + copy=copy, + level=level, + fill_value=fill_value, + limit=limit, + tolerance=tolerance, + ) + + @overload # type: ignore[override] + def rename_axis( + self, + mapper: IndexLabel | lib.NoDefault = ..., + *, + index=..., + axis: Axis = ..., + copy: bool = ..., + inplace: Literal[True], + ) -> None: + ... + + @overload + def rename_axis( + self, + mapper: IndexLabel | lib.NoDefault = ..., + *, + index=..., + axis: Axis = ..., + copy: bool = ..., + inplace: Literal[False] = ..., + ) -> Self: + ... + + @overload + def rename_axis( + self, + mapper: IndexLabel | lib.NoDefault = ..., + *, + index=..., + axis: Axis = ..., + copy: bool = ..., + inplace: bool = ..., + ) -> Self | None: + ... + + @doc(NDFrame.rename_axis) + def rename_axis( + self, + mapper: IndexLabel | lib.NoDefault = lib.no_default, + *, + index=lib.no_default, + axis: Axis = 0, + copy: bool = True, + inplace: bool = False, + ) -> Self | None: + return super().rename_axis( + mapper=mapper, + index=index, + axis=axis, + copy=copy, + inplace=inplace, + ) + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level | None = ..., + inplace: Literal[True], + errors: IgnoreRaise = ..., + ) -> None: + ... + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level | None = ..., + inplace: Literal[False] = ..., + errors: IgnoreRaise = ..., + ) -> Series: + ... + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level | None = ..., + inplace: bool = ..., + errors: IgnoreRaise = ..., + ) -> Series | None: + ... + + def drop( + self, + labels: IndexLabel | None = None, + *, + axis: Axis = 0, + index: IndexLabel | None = None, + columns: IndexLabel | None = None, + level: Level | None = None, + inplace: bool = False, + errors: IgnoreRaise = "raise", + ) -> Series | None: + """ + Return Series with specified index labels removed. + + Remove elements of a Series based on specifying the index labels. + When using a multi-index, labels on different levels can be removed + by specifying the level. + + Parameters + ---------- + labels : single label or list-like + Index labels to drop. + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + index : single label or list-like + Redundant for application on Series, but 'index' can be used instead + of 'labels'. + columns : single label or list-like + No change is made to the Series; use 'index' or 'labels' instead. + level : int or level name, optional + For MultiIndex, level for which the labels will be removed. + inplace : bool, default False + If True, do operation inplace and return None. + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and only existing labels are dropped. + + Returns + ------- + Series or None + Series with specified index labels removed or None if ``inplace=True``. + + Raises + ------ + KeyError + If none of the labels are found in the index. + + See Also + -------- + Series.reindex : Return only specified index labels of Series. + Series.dropna : Return series without null values. + Series.drop_duplicates : Return Series with duplicate values removed. + DataFrame.drop : Drop specified labels from rows or columns. + + Examples + -------- + >>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C']) + >>> s + A 0 + B 1 + C 2 + dtype: int64 + + Drop labels B en C + + >>> s.drop(labels=['B', 'C']) + A 0 + dtype: int64 + + Drop 2nd level label in MultiIndex Series + + >>> midx = pd.MultiIndex(levels=[['llama', 'cow', 'falcon'], + ... ['speed', 'weight', 'length']], + ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], + ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) + >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], + ... index=midx) + >>> s + llama speed 45.0 + weight 200.0 + length 1.2 + cow speed 30.0 + weight 250.0 + length 1.5 + falcon speed 320.0 + weight 1.0 + length 0.3 + dtype: float64 + + >>> s.drop(labels='weight', level=1) + llama speed 45.0 + length 1.2 + cow speed 30.0 + length 1.5 + falcon speed 320.0 + length 0.3 + dtype: float64 + """ + return super().drop( + labels=labels, + axis=axis, + index=index, + columns=columns, + level=level, + inplace=inplace, + errors=errors, + ) + + def pop(self, item: Hashable) -> Any: + """ + Return item and drops from series. Raise KeyError if not found. + + Parameters + ---------- + item : label + Index of the element that needs to be removed. + + Returns + ------- + Value that is popped from series. + + Examples + -------- + >>> ser = pd.Series([1, 2, 3]) + + >>> ser.pop(0) + 1 + + >>> ser + 1 2 + 2 3 + dtype: int64 + """ + return super().pop(item=item) + + @doc(INFO_DOCSTRING, **series_sub_kwargs) + def info( + self, + verbose: bool | None = None, + buf: IO[str] | None = None, + max_cols: int | None = None, + memory_usage: bool | str | None = None, + show_counts: bool = True, + ) -> None: + return SeriesInfo(self, memory_usage).render( + buf=buf, + max_cols=max_cols, + verbose=verbose, + show_counts=show_counts, + ) + + # TODO(3.0): this can be removed once GH#33302 deprecation is enforced + def _replace_single(self, to_replace, method: str, inplace: bool, limit): + """ + Replaces values in a Series using the fill method specified when no + replacement value is given in the replace method + """ + + result = self if inplace else self.copy() + + values = result._values + mask = missing.mask_missing(values, to_replace) + + if isinstance(values, ExtensionArray): + # dispatch to the EA's _pad_mask_inplace method + values._fill_mask_inplace(method, limit, mask) + else: + fill_f = missing.get_fill_func(method) + fill_f(values, limit=limit, mask=mask) + + if inplace: + return + return result + + def memory_usage(self, index: bool = True, deep: bool = False) -> int: + """ + Return the memory usage of the Series. + + The memory usage can optionally include the contribution of + the index and of elements of `object` dtype. + + Parameters + ---------- + index : bool, default True + Specifies whether to include the memory usage of the Series index. + deep : bool, default False + If True, introspect the data deeply by interrogating + `object` dtypes for system-level memory consumption, and include + it in the returned value. + + Returns + ------- + int + Bytes of memory consumed. + + See Also + -------- + numpy.ndarray.nbytes : Total bytes consumed by the elements of the + array. + DataFrame.memory_usage : Bytes consumed by a DataFrame. + + Examples + -------- + >>> s = pd.Series(range(3)) + >>> s.memory_usage() + 152 + + Not including the index gives the size of the rest of the data, which + is necessarily smaller: + + >>> s.memory_usage(index=False) + 24 + + The memory footprint of `object` values is ignored by default: + + >>> s = pd.Series(["a", "b"]) + >>> s.values + array(['a', 'b'], dtype=object) + >>> s.memory_usage() + 144 + >>> s.memory_usage(deep=True) + 244 + """ + v = self._memory_usage(deep=deep) + if index: + v += self.index.memory_usage(deep=deep) + return v + + def isin(self, values) -> Series: + """ + Whether elements in Series are contained in `values`. + + Return a boolean Series showing whether each element in the Series + matches an element in the passed sequence of `values` exactly. + + Parameters + ---------- + values : set or list-like + The sequence of values to test. Passing in a single string will + raise a ``TypeError``. Instead, turn a single string into a + list of one element. + + Returns + ------- + Series + Series of booleans indicating if each element is in values. + + Raises + ------ + TypeError + * If `values` is a string + + See Also + -------- + DataFrame.isin : Equivalent method on DataFrame. + + Examples + -------- + >>> s = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama', + ... 'hippo'], name='animal') + >>> s.isin(['cow', 'llama']) + 0 True + 1 True + 2 True + 3 False + 4 True + 5 False + Name: animal, dtype: bool + + To invert the boolean values, use the ``~`` operator: + + >>> ~s.isin(['cow', 'llama']) + 0 False + 1 False + 2 False + 3 True + 4 False + 5 True + Name: animal, dtype: bool + + Passing a single string as ``s.isin('llama')`` will raise an error. Use + a list of one element instead: + + >>> s.isin(['llama']) + 0 True + 1 False + 2 True + 3 False + 4 True + 5 False + Name: animal, dtype: bool + + Strings and integers are distinct and are therefore not comparable: + + >>> pd.Series([1]).isin(['1']) + 0 False + dtype: bool + >>> pd.Series([1.1]).isin(['1.1']) + 0 False + dtype: bool + """ + result = algorithms.isin(self._values, values) + return self._constructor(result, index=self.index, copy=False).__finalize__( + self, method="isin" + ) + + def between( + self, + left, + right, + inclusive: Literal["both", "neither", "left", "right"] = "both", + ) -> Series: + """ + Return boolean Series equivalent to left <= series <= right. + + This function returns a boolean vector containing `True` wherever the + corresponding Series element is between the boundary values `left` and + `right`. NA values are treated as `False`. + + Parameters + ---------- + left : scalar or list-like + Left boundary. + right : scalar or list-like + Right boundary. + inclusive : {"both", "neither", "left", "right"} + Include boundaries. Whether to set each bound as closed or open. + + .. versionchanged:: 1.3.0 + + Returns + ------- + Series + Series representing whether each element is between left and + right (inclusive). + + See Also + -------- + Series.gt : Greater than of series and other. + Series.lt : Less than of series and other. + + Notes + ----- + This function is equivalent to ``(left <= ser) & (ser <= right)`` + + Examples + -------- + >>> s = pd.Series([2, 0, 4, 8, np.nan]) + + Boundary values are included by default: + + >>> s.between(1, 4) + 0 True + 1 False + 2 True + 3 False + 4 False + dtype: bool + + With `inclusive` set to ``"neither"`` boundary values are excluded: + + >>> s.between(1, 4, inclusive="neither") + 0 True + 1 False + 2 False + 3 False + 4 False + dtype: bool + + `left` and `right` can be any scalar value: + + >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve']) + >>> s.between('Anna', 'Daniel') + 0 False + 1 True + 2 True + 3 False + dtype: bool + """ + if inclusive == "both": + lmask = self >= left + rmask = self <= right + elif inclusive == "left": + lmask = self >= left + rmask = self < right + elif inclusive == "right": + lmask = self > left + rmask = self <= right + elif inclusive == "neither": + lmask = self > left + rmask = self < right + else: + raise ValueError( + "Inclusive has to be either string of 'both'," + "'left', 'right', or 'neither'." + ) + + return lmask & rmask + + def case_when( + self, + caselist: list[ + tuple[ + ArrayLike | Callable[[Series], Series | np.ndarray | Sequence[bool]], + ArrayLike | Scalar | Callable[[Series], Series | np.ndarray], + ], + ], + ) -> Series: + """ + Replace values where the conditions are True. + + Parameters + ---------- + caselist : A list of tuples of conditions and expected replacements + Takes the form: ``(condition0, replacement0)``, + ``(condition1, replacement1)``, ... . + ``condition`` should be a 1-D boolean array-like object + or a callable. If ``condition`` is a callable, + it is computed on the Series + and should return a boolean Series or array. + The callable must not change the input Series + (though pandas doesn`t check it). ``replacement`` should be a + 1-D array-like object, a scalar or a callable. + If ``replacement`` is a callable, it is computed on the Series + and should return a scalar or Series. The callable + must not change the input Series + (though pandas doesn`t check it). + + .. versionadded:: 2.2.0 + + Returns + ------- + Series + + See Also + -------- + Series.mask : Replace values where the condition is True. + + Examples + -------- + >>> c = pd.Series([6, 7, 8, 9], name='c') + >>> a = pd.Series([0, 0, 1, 2]) + >>> b = pd.Series([0, 3, 4, 5]) + + >>> c.case_when(caselist=[(a.gt(0), a), # condition, replacement + ... (b.gt(0), b)]) + 0 6 + 1 3 + 2 1 + 3 2 + Name: c, dtype: int64 + """ + if not isinstance(caselist, list): + raise TypeError( + f"The caselist argument should be a list; instead got {type(caselist)}" + ) + + if not caselist: + raise ValueError( + "provide at least one boolean condition, " + "with a corresponding replacement." + ) + + for num, entry in enumerate(caselist): + if not isinstance(entry, tuple): + raise TypeError( + f"Argument {num} must be a tuple; instead got {type(entry)}." + ) + if len(entry) != 2: + raise ValueError( + f"Argument {num} must have length 2; " + "a condition and replacement; " + f"instead got length {len(entry)}." + ) + caselist = [ + ( + com.apply_if_callable(condition, self), + com.apply_if_callable(replacement, self), + ) + for condition, replacement in caselist + ] + default = self.copy() + conditions, replacements = zip(*caselist) + common_dtypes = [infer_dtype_from(arg)[0] for arg in [*replacements, default]] + if len(set(common_dtypes)) > 1: + common_dtype = find_common_type(common_dtypes) + updated_replacements = [] + for condition, replacement in zip(conditions, replacements): + if is_scalar(replacement): + replacement = construct_1d_arraylike_from_scalar( + value=replacement, length=len(condition), dtype=common_dtype + ) + elif isinstance(replacement, ABCSeries): + replacement = replacement.astype(common_dtype) + else: + replacement = pd_array(replacement, dtype=common_dtype) + updated_replacements.append(replacement) + replacements = updated_replacements + default = default.astype(common_dtype) + + counter = reversed(range(len(conditions))) + for position, condition, replacement in zip( + counter, conditions[::-1], replacements[::-1] + ): + try: + default = default.mask( + condition, other=replacement, axis=0, inplace=False, level=None + ) + except Exception as error: + raise ValueError( + f"Failed to apply condition{position} and replacement{position}." + ) from error + return default + + # error: Cannot determine type of 'isna' + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] + def isna(self) -> Series: + return NDFrame.isna(self) + + # error: Cannot determine type of 'isna' + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] + def isnull(self) -> Series: + """ + Series.isnull is an alias for Series.isna. + """ + return super().isnull() + + # error: Cannot determine type of 'notna' + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] + def notna(self) -> Series: + return super().notna() + + # error: Cannot determine type of 'notna' + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] + def notnull(self) -> Series: + """ + Series.notnull is an alias for Series.notna. + """ + return super().notnull() + + @overload + def dropna( + self, + *, + axis: Axis = ..., + inplace: Literal[False] = ..., + how: AnyAll | None = ..., + ignore_index: bool = ..., + ) -> Series: + ... + + @overload + def dropna( + self, + *, + axis: Axis = ..., + inplace: Literal[True], + how: AnyAll | None = ..., + ignore_index: bool = ..., + ) -> None: + ... + + def dropna( + self, + *, + axis: Axis = 0, + inplace: bool = False, + how: AnyAll | None = None, + ignore_index: bool = False, + ) -> Series | None: + """ + Return a new Series with missing values removed. + + See the :ref:`User Guide ` for more on which values are + considered missing, and how to work with missing data. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + inplace : bool, default False + If True, do operation inplace and return None. + how : str, optional + Not in use. Kept for compatibility. + ignore_index : bool, default ``False`` + If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. + + .. versionadded:: 2.0.0 + + Returns + ------- + Series or None + Series with NA entries dropped from it or None if ``inplace=True``. + + See Also + -------- + Series.isna: Indicate missing values. + Series.notna : Indicate existing (non-missing) values. + Series.fillna : Replace missing values. + DataFrame.dropna : Drop rows or columns which contain NA values. + Index.dropna : Drop missing indices. + + Examples + -------- + >>> ser = pd.Series([1., 2., np.nan]) + >>> ser + 0 1.0 + 1 2.0 + 2 NaN + dtype: float64 + + Drop NA values from a Series. + + >>> ser.dropna() + 0 1.0 + 1 2.0 + dtype: float64 + + Empty strings are not considered NA values. ``None`` is considered an + NA value. + + >>> ser = pd.Series([np.nan, 2, pd.NaT, '', None, 'I stay']) + >>> ser + 0 NaN + 1 2 + 2 NaT + 3 + 4 None + 5 I stay + dtype: object + >>> ser.dropna() + 1 2 + 3 + 5 I stay + dtype: object + """ + inplace = validate_bool_kwarg(inplace, "inplace") + ignore_index = validate_bool_kwarg(ignore_index, "ignore_index") + # Validate the axis parameter + self._get_axis_number(axis or 0) + + if self._can_hold_na: + result = remove_na_arraylike(self) + else: + if not inplace: + result = self.copy(deep=None) + else: + result = self + + if ignore_index: + result.index = default_index(len(result)) + + if inplace: + return self._update_inplace(result) + else: + return result + + # ---------------------------------------------------------------------- + # Time series-oriented methods + + def to_timestamp( + self, + freq: Frequency | None = None, + how: Literal["s", "e", "start", "end"] = "start", + copy: bool | None = None, + ) -> Series: + """ + Cast to DatetimeIndex of Timestamps, at *beginning* of period. + + Parameters + ---------- + freq : str, default frequency of PeriodIndex + Desired frequency. + how : {'s', 'e', 'start', 'end'} + Convention for converting period to timestamp; start of period + vs. end. + copy : bool, default True + Whether or not to return a copy. + + .. note:: + The `copy` keyword will change behavior in pandas 3.0. + `Copy-on-Write + `__ + will be enabled by default, which means that all methods with a + `copy` keyword will use a lazy copy mechanism to defer the copy and + ignore the `copy` keyword. The `copy` keyword will be removed in a + future version of pandas. + + You can already get the future behavior and improvements through + enabling copy on write ``pd.options.mode.copy_on_write = True`` + + Returns + ------- + Series with DatetimeIndex + + Examples + -------- + >>> idx = pd.PeriodIndex(['2023', '2024', '2025'], freq='Y') + >>> s1 = pd.Series([1, 2, 3], index=idx) + >>> s1 + 2023 1 + 2024 2 + 2025 3 + Freq: Y-DEC, dtype: int64 + + The resulting frequency of the Timestamps is `YearBegin` + + >>> s1 = s1.to_timestamp() + >>> s1 + 2023-01-01 1 + 2024-01-01 2 + 2025-01-01 3 + Freq: YS-JAN, dtype: int64 + + Using `freq` which is the offset that the Timestamps will have + + >>> s2 = pd.Series([1, 2, 3], index=idx) + >>> s2 = s2.to_timestamp(freq='M') + >>> s2 + 2023-01-31 1 + 2024-01-31 2 + 2025-01-31 3 + Freq: YE-JAN, dtype: int64 + """ + if not isinstance(self.index, PeriodIndex): + raise TypeError(f"unsupported Type {type(self.index).__name__}") + + new_obj = self.copy(deep=copy and not using_copy_on_write()) + new_index = self.index.to_timestamp(freq=freq, how=how) + setattr(new_obj, "index", new_index) + return new_obj + + def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series: + """ + Convert Series from DatetimeIndex to PeriodIndex. + + Parameters + ---------- + freq : str, default None + Frequency associated with the PeriodIndex. + copy : bool, default True + Whether or not to return a copy. + + .. note:: + The `copy` keyword will change behavior in pandas 3.0. + `Copy-on-Write + `__ + will be enabled by default, which means that all methods with a + `copy` keyword will use a lazy copy mechanism to defer the copy and + ignore the `copy` keyword. The `copy` keyword will be removed in a + future version of pandas. + + You can already get the future behavior and improvements through + enabling copy on write ``pd.options.mode.copy_on_write = True`` + + Returns + ------- + Series + Series with index converted to PeriodIndex. + + Examples + -------- + >>> idx = pd.DatetimeIndex(['2023', '2024', '2025']) + >>> s = pd.Series([1, 2, 3], index=idx) + >>> s = s.to_period() + >>> s + 2023 1 + 2024 2 + 2025 3 + Freq: Y-DEC, dtype: int64 + + Viewing the index + + >>> s.index + PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]') + """ + if not isinstance(self.index, DatetimeIndex): + raise TypeError(f"unsupported Type {type(self.index).__name__}") + + new_obj = self.copy(deep=copy and not using_copy_on_write()) + new_index = self.index.to_period(freq=freq) + setattr(new_obj, "index", new_index) + return new_obj + + # ---------------------------------------------------------------------- + # Add index + _AXIS_ORDERS: list[Literal["index", "columns"]] = ["index"] + _AXIS_LEN = len(_AXIS_ORDERS) + _info_axis_number: Literal[0] = 0 + _info_axis_name: Literal["index"] = "index" + + index = properties.AxisProperty( + axis=0, + doc=""" + The index (axis labels) of the Series. + + The index of a Series is used to label and identify each element of the + underlying data. The index can be thought of as an immutable ordered set + (technically a multi-set, as it may contain duplicate labels), and is + used to index and align data in pandas. + + Returns + ------- + Index + The index labels of the Series. + + See Also + -------- + Series.reindex : Conform Series to new index. + Index : The base pandas index type. + + Notes + ----- + For more information on pandas indexing, see the `indexing user guide + `__. + + Examples + -------- + To create a Series with a custom index and view the index labels: + + >>> cities = ['Kolkata', 'Chicago', 'Toronto', 'Lisbon'] + >>> populations = [14.85, 2.71, 2.93, 0.51] + >>> city_series = pd.Series(populations, index=cities) + >>> city_series.index + Index(['Kolkata', 'Chicago', 'Toronto', 'Lisbon'], dtype='object') + + To change the index labels of an existing Series: + + >>> city_series.index = ['KOL', 'CHI', 'TOR', 'LIS'] + >>> city_series.index + Index(['KOL', 'CHI', 'TOR', 'LIS'], dtype='object') + """, + ) + + # ---------------------------------------------------------------------- + # Accessor Methods + # ---------------------------------------------------------------------- + str = CachedAccessor("str", StringMethods) + dt = CachedAccessor("dt", CombinedDatetimelikeProperties) + cat = CachedAccessor("cat", CategoricalAccessor) + plot = CachedAccessor("plot", pandas.plotting.PlotAccessor) + sparse = CachedAccessor("sparse", SparseAccessor) + struct = CachedAccessor("struct", StructAccessor) + list = CachedAccessor("list", ListAccessor) + + # ---------------------------------------------------------------------- + # Add plotting methods to Series + hist = pandas.plotting.hist_series + + # ---------------------------------------------------------------------- + # Template-Based Arithmetic/Comparison Methods + + def _cmp_method(self, other, op): + res_name = ops.get_op_result_name(self, other) + + if isinstance(other, Series) and not self._indexed_same(other): + raise ValueError("Can only compare identically-labeled Series objects") + + lvalues = self._values + rvalues = extract_array(other, extract_numpy=True, extract_range=True) + + res_values = ops.comparison_op(lvalues, rvalues, op) + + return self._construct_result(res_values, name=res_name) + + def _logical_method(self, other, op): + res_name = ops.get_op_result_name(self, other) + self, other = self._align_for_op(other, align_asobject=True) + + lvalues = self._values + rvalues = extract_array(other, extract_numpy=True, extract_range=True) + + res_values = ops.logical_op(lvalues, rvalues, op) + return self._construct_result(res_values, name=res_name) + + def _arith_method(self, other, op): + self, other = self._align_for_op(other) + return base.IndexOpsMixin._arith_method(self, other, op) + + def _align_for_op(self, right, align_asobject: bool = False): + """align lhs and rhs Series""" + # TODO: Different from DataFrame._align_for_op, list, tuple and ndarray + # are not coerced here + # because Series has inconsistencies described in GH#13637 + left = self + + if isinstance(right, Series): + # avoid repeated alignment + if not left.index.equals(right.index): + if align_asobject: + if left.dtype not in (object, np.bool_) or right.dtype not in ( + object, + np.bool_, + ): + warnings.warn( + "Operation between non boolean Series with different " + "indexes will no longer return a boolean result in " + "a future version. Cast both Series to object type " + "to maintain the prior behavior.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # to keep original value's dtype for bool ops + left = left.astype(object) + right = right.astype(object) + + left, right = left.align(right, copy=False) + + return left, right + + def _binop(self, other: Series, func, level=None, fill_value=None) -> Series: + """ + Perform generic binary operation with optional fill value. + + Parameters + ---------- + other : Series + func : binary operator + fill_value : float or object + Value to substitute for NA/null values. If both Series are NA in a + location, the result will be NA regardless of the passed fill value. + level : int or level name, default None + Broadcast across a level, matching Index values on the + passed MultiIndex level. + + Returns + ------- + Series + """ + this = self + + if not self.index.equals(other.index): + this, other = self.align(other, level=level, join="outer", copy=False) + + this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) + + with np.errstate(all="ignore"): + result = func(this_vals, other_vals) + + name = ops.get_op_result_name(self, other) + out = this._construct_result(result, name) + return cast(Series, out) + + def _construct_result( + self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable + ) -> Series | tuple[Series, Series]: + """ + Construct an appropriately-labelled Series from the result of an op. + + Parameters + ---------- + result : ndarray or ExtensionArray + name : Label + + Returns + ------- + Series + In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. + """ + if isinstance(result, tuple): + # produced by divmod or rdivmod + + res1 = self._construct_result(result[0], name=name) + res2 = self._construct_result(result[1], name=name) + + # GH#33427 assertions to keep mypy happy + assert isinstance(res1, Series) + assert isinstance(res2, Series) + return (res1, res2) + + # TODO: result should always be ArrayLike, but this fails for some + # JSONArray tests + dtype = getattr(result, "dtype", None) + out = self._constructor(result, index=self.index, dtype=dtype, copy=False) + out = out.__finalize__(self) + + # Set the result's name after __finalize__ is called because __finalize__ + # would set it back to self.name + out.name = name + return out + + def _flex_method(self, other, op, *, level=None, fill_value=None, axis: Axis = 0): + if axis is not None: + self._get_axis_number(axis) + + res_name = ops.get_op_result_name(self, other) + + if isinstance(other, Series): + return self._binop(other, op, level=level, fill_value=fill_value) + elif isinstance(other, (np.ndarray, list, tuple)): + if len(other) != len(self): + raise ValueError("Lengths must be equal") + other = self._constructor(other, self.index, copy=False) + result = self._binop(other, op, level=level, fill_value=fill_value) + result._name = res_name + return result + else: + if fill_value is not None: + if isna(other): + return op(self, fill_value) + self = self.fillna(fill_value) + + return op(self, other) + + @Appender(ops.make_flex_doc("eq", "series")) + def eq( + self, + other, + level: Level | None = None, + fill_value: float | None = None, + axis: Axis = 0, + ) -> Series: + return self._flex_method( + other, operator.eq, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("ne", "series")) + def ne(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.ne, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("le", "series")) + def le(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.le, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("lt", "series")) + def lt(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.lt, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("ge", "series")) + def ge(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.ge, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("gt", "series")) + def gt(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.gt, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("add", "series")) + def add(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.add, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("radd", "series")) + def radd(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, roperator.radd, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("sub", "series")) + def sub(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.sub, level=level, fill_value=fill_value, axis=axis + ) + + subtract = sub + + @Appender(ops.make_flex_doc("rsub", "series")) + def rsub(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, roperator.rsub, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("mul", "series")) + def mul( + self, + other, + level: Level | None = None, + fill_value: float | None = None, + axis: Axis = 0, + ) -> Series: + return self._flex_method( + other, operator.mul, level=level, fill_value=fill_value, axis=axis + ) + + multiply = mul + + @Appender(ops.make_flex_doc("rmul", "series")) + def rmul(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, roperator.rmul, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("truediv", "series")) + def truediv(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.truediv, level=level, fill_value=fill_value, axis=axis + ) + + div = truediv + divide = truediv + + @Appender(ops.make_flex_doc("rtruediv", "series")) + def rtruediv(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, roperator.rtruediv, level=level, fill_value=fill_value, axis=axis + ) + + rdiv = rtruediv + + @Appender(ops.make_flex_doc("floordiv", "series")) + def floordiv(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.floordiv, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rfloordiv", "series")) + def rfloordiv(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, roperator.rfloordiv, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("mod", "series")) + def mod(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.mod, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rmod", "series")) + def rmod(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, roperator.rmod, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("pow", "series")) + def pow(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, operator.pow, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rpow", "series")) + def rpow(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, roperator.rpow, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("divmod", "series")) + def divmod(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, divmod, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rdivmod", "series")) + def rdivmod(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: + return self._flex_method( + other, roperator.rdivmod, level=level, fill_value=fill_value, axis=axis + ) + + # ---------------------------------------------------------------------- + # Reductions + + def _reduce( + self, + op, + # error: Variable "pandas.core.series.Series.str" is not valid as a type + name: str, # type: ignore[valid-type] + *, + axis: Axis = 0, + skipna: bool = True, + numeric_only: bool = False, + filter_type=None, + **kwds, + ): + """ + Perform a reduction operation. + + If we have an ndarray as a value, then simply perform the operation, + otherwise delegate to the object. + """ + delegate = self._values + + if axis is not None: + self._get_axis_number(axis) + + if isinstance(delegate, ExtensionArray): + # dispatch to ExtensionArray interface + return delegate._reduce(name, skipna=skipna, **kwds) + + else: + # dispatch to numpy arrays + if numeric_only and self.dtype.kind not in "iufcb": + # i.e. not is_numeric_dtype(self.dtype) + kwd_name = "numeric_only" + if name in ["any", "all"]: + kwd_name = "bool_only" + # GH#47500 - change to TypeError to match other methods + raise TypeError( + f"Series.{name} does not allow {kwd_name}={numeric_only} " + "with non-numeric dtypes." + ) + return op(delegate, skipna=skipna, **kwds) + + @Appender(make_doc("any", ndim=1)) + # error: Signature of "any" incompatible with supertype "NDFrame" + def any( # type: ignore[override] + self, + *, + axis: Axis = 0, + bool_only: bool = False, + skipna: bool = True, + **kwargs, + ) -> bool: + nv.validate_logical_func((), kwargs, fname="any") + validate_bool_kwarg(skipna, "skipna", none_allowed=False) + return self._reduce( + nanops.nanany, + name="any", + axis=axis, + numeric_only=bool_only, + skipna=skipna, + filter_type="bool", + ) + + @Appender(make_doc("all", ndim=1)) + def all( + self, + axis: Axis = 0, + bool_only: bool = False, + skipna: bool = True, + **kwargs, + ) -> bool: + nv.validate_logical_func((), kwargs, fname="all") + validate_bool_kwarg(skipna, "skipna", none_allowed=False) + return self._reduce( + nanops.nanall, + name="all", + axis=axis, + numeric_only=bool_only, + skipna=skipna, + filter_type="bool", + ) + + @doc(make_doc("min", ndim=1)) + def min( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.min(self, axis, skipna, numeric_only, **kwargs) + + @doc(make_doc("max", ndim=1)) + def max( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.max(self, axis, skipna, numeric_only, **kwargs) + + @doc(make_doc("sum", ndim=1)) + def sum( + self, + axis: Axis | None = None, + skipna: bool = True, + numeric_only: bool = False, + min_count: int = 0, + **kwargs, + ): + return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs) + + @doc(make_doc("prod", ndim=1)) + def prod( + self, + axis: Axis | None = None, + skipna: bool = True, + numeric_only: bool = False, + min_count: int = 0, + **kwargs, + ): + return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs) + + @doc(make_doc("mean", ndim=1)) + def mean( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs) + + @doc(make_doc("median", ndim=1)) + def median( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.median(self, axis, skipna, numeric_only, **kwargs) + + @doc(make_doc("sem", ndim=1)) + def sem( + self, + axis: Axis | None = None, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs) + + @doc(make_doc("var", ndim=1)) + def var( + self, + axis: Axis | None = None, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs) + + @doc(make_doc("std", ndim=1)) + def std( + self, + axis: Axis | None = None, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs) + + @doc(make_doc("skew", ndim=1)) + def skew( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs) + + @doc(make_doc("kurt", ndim=1)) + def kurt( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs) + + kurtosis = kurt + product = prod + + @doc(make_doc("cummin", ndim=1)) + def cummin(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cummin(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cummax", ndim=1)) + def cummax(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cummax(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cumsum", ndim=1)) + def cumsum(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cumprod", 1)) + def cumprod(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/blend_square.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/blend_square.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ef2fd797bde0bd1ab6e753d1481da243e1b0ea --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/blend_square.py @@ -0,0 +1,37 @@ +'''OpenGL extension NV.blend_square + +This module customises the behaviour of the +OpenGL.raw.GL.NV.blend_square to provide a more +Python-friendly API + +Overview (from the spec) + + It is useful to be able to multiply a number by itself in the blending + stages -- for example, in certain types of specular lighting effects + where a result from a dot product needs to be taken to a high power. + + This extension provides four additional blending factors to permit + this and other effects: SRC_COLOR and ONE_MINUS_SRC_COLOR for source + blending factors, and DST_COLOR and ONE_MINUS_DST_COLOR for destination + blending factors. + + Direct3D provides capability bits for advertising these additional + blend modes. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/blend_square.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.blend_square import * +from OpenGL.raw.GL.NV.blend_square import _EXTENSION_NAME + +def glInitBlendSquareNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/deep_texture3D.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/deep_texture3D.py new file mode 100644 index 0000000000000000000000000000000000000000..b9ebb51cee35bf34931f1aa3e9c4edc546b6802f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/deep_texture3D.py @@ -0,0 +1,32 @@ +'''OpenGL extension NV.deep_texture3D + +This module customises the behaviour of the +OpenGL.raw.GL.NV.deep_texture3D to provide a more +Python-friendly API + +Overview (from the spec) + + Some applications require 3D textures that have a significant number of + slices, but less resolution in width and height. In the current spec, the + maximum value for the size of all three dimensions is specified by a + single value. This extension adds a second set of limits against which 3D + textures can be checked if an application needs deeper textures than would + be allowed by the symmetric texture limits. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/deep_texture3D.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.deep_texture3D import * +from OpenGL.raw.GL.NV.deep_texture3D import _EXTENSION_NAME + +def glInitDeepTexture3DNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/depth_buffer_float.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/depth_buffer_float.py new file mode 100644 index 0000000000000000000000000000000000000000..4c45b1e2c0593f09ccf7811fc47debd29bad1650 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/depth_buffer_float.py @@ -0,0 +1,48 @@ +'''OpenGL extension NV.depth_buffer_float + +This module customises the behaviour of the +OpenGL.raw.GL.NV.depth_buffer_float to provide a more +Python-friendly API + +Overview (from the spec) + + This extension provides new texture internal formats whose depth + components are stored as 32-bit floating-point values, rather than the + normalized unsigned integers used in existing depth formats. + Floating-point depth textures support all the functionality supported for + fixed-point depth textures, including shadow mapping and rendering support + via EXT_framebuffer_object. Floating-point depth textures can store + values outside the range [0,1]. + + By default, OpenGL entry points taking depth values implicitly clamp the + values to the range [0,1]. This extension provides new DepthClear, + DepthRange, and DepthBoundsEXT entry points that allow applications to + specify depth values that are not clamped. + + Additionally, this extension provides new packed depth/stencil pixel + formats (see EXT_packed_depth_stencil) that have 64-bit pixels consisting + of a 32-bit floating-point depth value, 8 bits of stencil, and 24 unused + bites. A packed depth/stencil texture internal format is also provided. + + This extension does not provide support for WGL or GLX pixel formats with + floating-point depth buffers. The existing (but not commonly used) + WGL_EXT_depth_float extension could be used for this purpose. + + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/depth_buffer_float.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.depth_buffer_float import * +from OpenGL.raw.GL.NV.depth_buffer_float import _EXTENSION_NAME + +def glInitDepthBufferFloatNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/evaluators.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/evaluators.py new file mode 100644 index 0000000000000000000000000000000000000000..15a3ed93b50e0080fcd69f78c798a49279e77d64 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/evaluators.py @@ -0,0 +1,103 @@ +'''OpenGL extension NV.evaluators + +This module customises the behaviour of the +OpenGL.raw.GL.NV.evaluators to provide a more +Python-friendly API + +Overview (from the spec) + + OpenGL evaluators provide applications with the capability to + specify polynomial or rational curves and surfaces using control + points relative to the Bezier basis. The curves and surfaces are + then drawn by evaluating the polynomials provided at various values + for the u parameter of a curve or the (u,v) parameters of a surface. + A tensor product formulation is used for the surfaces. + + For various historical reasons, evaluators have not been + particularly popular as an interface for drawing curves and surfaces. + This extension proposes a new interface for surfaces that provides a + number of significant enhancements to the functionality provided by + the original OpenGL evaluators. + + Many implementations never optimized evaluators, so applications + often implemented their own algorithms instead. This extension + relaxes some restrictions that make it difficult to optimize + evaluators. + + Also, new vertex attributes have been added to OpenGL through + extensions, including multiple sets of texture coordinates, a + secondary color, a fog coordinate, a vertex weight, and others. + The extensions which added these vertex attributes never bothered + to update the functionality of evaluators, since they were used so + little in the first place. In turn, evaluators have become more and + more out of date, making it even less likely that developers will + want to use them. Most of the attributes are not a big loss, but + support for multiple sets of texture coordinates would be absolutely + essential to developers considering the use of evaluators. + + OpenGL evaluators only support rectangular patches, not triangular + patches. Although triangular patches can be converted into + rectangular patches, direct support for triangular patches is likely + to be more efficient. + + The tessellation algorithm used is too inflexible for most purposes; + only the number of rows and columns can be specified. Adjacent + patches must then have identical numbers of rows and columns, or + severe cracking will occur. Ideally, a number of subdivisions could + be specified for all four sides of a rectangular patch and for all + three of a triangular patch. This extension goes one step further + and allows those numbers to be specified in floating-point, providing + a mechanism for smoothly changing the level of detail of the surface. + + Meshes evaluated with EvalMesh are required to match up exactly + with equivalent meshes evaluated with EvalCoord or EvalPoint. + This makes it difficult or impossible to use optimizations such as + forward differencing. + + Finally, little attention is given to some of the difficult problems + that can arise when multiple patches are drawn. Depending on the + way evaluators are implemented, and depending on the orientation of + edges, numerical accuracy problems can cause cracks to appear between + patches with the same boundary control points. This extension makes + guarantees that an edge shared between two patches will match up + exactly under certain conditions. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/evaluators.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.evaluators import * +from OpenGL.raw.GL.NV.evaluators import _EXTENSION_NAME + +def glInitEvaluatorsNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + +# INPUT glMapControlPointsNV.points size not checked against 'target,uorder,vorder' +glMapControlPointsNV=wrapper.wrapper(glMapControlPointsNV).setInputArraySize( + 'points', None +) +# INPUT glMapParameterivNV.params size not checked against 'target,pname' +glMapParameterivNV=wrapper.wrapper(glMapParameterivNV).setInputArraySize( + 'params', None +) +# INPUT glMapParameterfvNV.params size not checked against 'target,pname' +glMapParameterfvNV=wrapper.wrapper(glMapParameterfvNV).setInputArraySize( + 'params', None +) +glGetMapControlPointsNV=wrapper.wrapper(glGetMapControlPointsNV).setOutput( + 'points',size=_glgets._glget_size_mapping,pnameArg='target',orPassIn=True +) +# OUTPUT glGetMapParameterivNV.params COMPSIZE(target,pname) +# OUTPUT glGetMapParameterfvNV.params COMPSIZE(target,pname) +glGetMapAttribParameterivNV=wrapper.wrapper(glGetMapAttribParameterivNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetMapAttribParameterfvNV=wrapper.wrapper(glGetMapAttribParameterfvNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/float_buffer.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/float_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..9f487d7c40309d21bf6b5276896518d033ca0a2c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/float_buffer.py @@ -0,0 +1,91 @@ +'''OpenGL extension NV.float_buffer + +This module customises the behaviour of the +OpenGL.raw.GL.NV.float_buffer to provide a more +Python-friendly API + +Overview (from the spec) + + This extension builds upon NV_fragment_program to provide a framebuffer + and texture format that allows fragment programs to read and write + unconstrained floating point data. + + In unextended OpenGL, most computations dealing with color or depth + buffers are typically constrained to operate on values in the range [0,1]. + Computational results are also typically clamped to the range [0,1]. + Color, texture, and depth buffers themselves also hold values mapped to + the range [0,1]. + + The NV_fragment_program extension provides a general computational model + that supports floating-point numbers constrained only by the precision of + the underlying data types. The quantites computed by fragment programs do + not necessarily correspond in number or in range to conventional + attributes such as RGBA colors or depth values. Because of the range and + precision constraints imposed by conventional fixed-point color buffers, + it may be difficult (if not impossible) to use them to implement certain + multi-pass algorithms. + + To enhance the extended range and precision available through fragment + programs, this extension provides floating-point RGBA color buffers that + can be used instead of conventional fixed-point RGBA color buffers. A + floating-point RGBA color buffer consists of one to four floating-point + components stored in the 16- or 32-bit floating-point formats (fp16 or + fp32) defined in the NV_half_float and NV_fragment_program extensions. + + When a floating-point color buffer is used, the results of fragment + programs, as written to the "x", "y", "z", and "w" components of the + o[COLR] or o[COLH] output registers, are written directly to the color + buffer without any clamping or modification. Certain per-fragment + operations are bypassed when rendering to floating-point color buffers. + + A floating-point color buffer can also be used as a texture map, either by + reading back the contents and then using conventional TexImage calls, or + by using the buffer directly via the ARB_render_texture extension or + the EXT_framebuffer_object extension. + + This extension has many uses. Some possible uses include: + + (1) Multi-pass algorithms with arbitrary intermediate results that + don't have to be artifically forced into the range [0,1]. In + addition, intermediate results can be written without having to + worry about out-of-range values. + + (2) Deferred shading algorithms where an expensive fragment program is + executed only after depth testing is fully complete. Instead, a + simple program is executed, which stores the parameters necessary + to produce a final result. After the entire scene is rendered, a + second pass is executed over the entire frame buffer to execute + the complex fragment program using the results written to the + floating-point color buffer in the first pass. This will save the + cost of applying complex fragment programs to fragments that will + not appear in the final image. + + (3) Use floating-point texture maps to evaluate functions with + arbitrary ranges. Arbitrary functions with a finite domain can be + approximated using a texture map holding sample results and + piecewise linear approximation. + + There are several significant limitations on the use of floating-point + color buffers. First, floating-point color buffers do not support frame + buffer blending. Second, floating-point texture maps do not support + mipmapping or any texture filtering other than NEAREST. Third, + floating-point texture maps must be 2D, and must use the + NV_texture_rectangle extension. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/float_buffer.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.float_buffer import * +from OpenGL.raw.GL.NV.float_buffer import _EXTENSION_NAME + +def glInitFloatBufferNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/fog_distance.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/fog_distance.py new file mode 100644 index 0000000000000000000000000000000000000000..becab6c5316211bc566c34ccaf827b4f780aa703 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/fog_distance.py @@ -0,0 +1,56 @@ +'''OpenGL extension NV.fog_distance + +This module customises the behaviour of the +OpenGL.raw.GL.NV.fog_distance to provide a more +Python-friendly API + +Overview (from the spec) + + Ideally, the fog distance (used to compute the fog factor as + described in Section 3.10) should be computed as the per-fragment + Euclidean distance to the fragment center from the eye. In practice, + implementations "may choose to approximate the eye-coordinate + distance from the eye to each fragment center by abs(ze). Further, + [the fog factor] f need not be computed at each fragment, but may + be computed at each vertex and interpolated as other data are." + + This extension provides the application specific control over how + OpenGL computes the distance used in computing the fog factor. + + The extension supports three fog distance modes: "eye plane absolute", + where the fog distance is the absolute planar distance from the eye + plane (i.e., OpenGL's standard implementation allowance as cited above); + "eye plane", where the fog distance is the signed planar distance + from the eye plane; and "eye radial", where the fog distance is + computed as a Euclidean distance. In the case of the eye radial + fog distance mode, the distance may be computed per-vertex and then + interpolated per-fragment. + + The intent of this extension is to provide applications with better + control over the tradeoff between performance and fog quality. + The "eye planar" modes (signed or absolute) are straightforward + to implement with good performance, but scenes are consistently + under-fogged at the edges of the field of view. The "eye radial" + mode can provide for more accurate fog at the edges of the field of + view, but this assumes that either the eye radial fog distance is + computed per-fragment, or if the fog distance is computed per-vertex + and then interpolated per-fragment, then the scene must be + sufficiently tessellated. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/fog_distance.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.fog_distance import * +from OpenGL.raw.GL.NV.fog_distance import _EXTENSION_NAME + +def glInitFogDistanceNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/fragment_program_option.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/fragment_program_option.py new file mode 100644 index 0000000000000000000000000000000000000000..8c8b308e0004e9c1c240719c5ed56c1804739ea5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/fragment_program_option.py @@ -0,0 +1,56 @@ +'''OpenGL extension NV.fragment_program_option + +This module customises the behaviour of the +OpenGL.raw.GL.NV.fragment_program_option to provide a more +Python-friendly API + +Overview (from the spec) + + This extension provides additional fragment program functionality + to extend the standard ARB_fragment_program language and execution + environment. ARB programs wishing to use this added functionality + need only add: + + OPTION NV_fragment_program; + + to the beginning of their fragment programs. + + The functionality provided by this extension, which is roughly + equivalent to that provided by the NV_fragment_program extension, + includes: + + * increased control over precision in arithmetic computations and + storage, + + * data-dependent conditional writemasks, + + * an absolute value operator on scalar and swizzled operand loads, + + * instructions to compute partial derivatives, and perform texture + lookups using specified partial derivatives, + + * fully orthogonal "set on" instructions, + + * instructions to compute reflection vector and perform a 2D + coordinate transform, and + + * instructions to pack and unpack multiple quantities into a single + component. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/fragment_program_option.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.fragment_program_option import * +from OpenGL.raw.GL.NV.fragment_program_option import _EXTENSION_NAME + +def glInitFragmentProgramOptionNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/geometry_program4.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/geometry_program4.py new file mode 100644 index 0000000000000000000000000000000000000000..481e6a5070fcd4c7fe2495dceaf7803d17c63397 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/geometry_program4.py @@ -0,0 +1,67 @@ +'''OpenGL extension NV.geometry_program4 + +This module customises the behaviour of the +OpenGL.raw.GL.NV.geometry_program4 to provide a more +Python-friendly API + +Overview (from the spec) + + NV_geometry_program4 defines a new type of program available to be run on + the GPU, called a geometry program. Geometry programs are run on full + primitives after vertices are transformed, but prior to flat shading and + clipping. + + A geometry program begins with a single primitive - a point, line, or + triangle. Quads and polygons are allowed, but are decomposed into + individual triangles prior to geometry program execution. It can read the + attributes of any of the vertex in the primitive and use them to generate + new primitives. A geometry program has a fixed output primitive type, + either a point, a line strip, or a triangle strip. It emits vertices + (using the EMIT opcode) to define the output primitive. The attributes of + emitted vertices are specified by writing to the same set of result + bindings (e.g., "result.position") provided for vertex programs. + Additionally, a geometry program can emit multiple disconnected primitives + by using the ENDPRIM opcode, which is roughly equivalent to calling End + and then Begin again. The primitives emitted by the geometry program are + then clipped and then processed like an equivalent OpenGL primitive + specified by the application. + + This extension provides four additional primitive types: lines with + adjacency, line strips with adjacency, separate triangles with adjacency, + and triangle strips with adjacency. Some of the vertices specified in + these new primitive types are not part of the ordinary primitives. + Instead, they represent neighboring vertices that are adjacent to the two + line segment end points (lines/strips) or the three triangle edges + (triangles/tstrips). These "adjacency" vertices can be accessed by + geometry programs and used to match up the outputs of the geometry program + with those of neighboring primitives. + + Additionally, geometry programs allow for layered rendering, where entire + three-dimensional, cube map, or array textures (EXT_texture_array) can be + bound to the current framebuffer. Geometry programs can use the + "result.layer" binding to select a layer or cube map face to render to. + Each primitive emitted by such a geometry program is rendered to the layer + taken from its provoking vertex. + + Since geometry programs expect a specific input primitive type, an error + will occur if the application presents primtives of a different type. For + example, if an enabled geometry program expects points, an error will + occur at Begin() time, if a primitive mode of TRIANGLES is specified. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/geometry_program4.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.geometry_program4 import * +from OpenGL.raw.GL.NV.geometry_program4 import _EXTENSION_NAME + +def glInitGeometryProgram4NV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/geometry_shader4.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/geometry_shader4.py new file mode 100644 index 0000000000000000000000000000000000000000..828d66b40154065326943b794ed8ea4a50ce1d87 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/geometry_shader4.py @@ -0,0 +1,37 @@ +'''OpenGL extension NV.geometry_shader4 + +This module customises the behaviour of the +OpenGL.raw.GL.NV.geometry_shader4 to provide a more +Python-friendly API + +Overview (from the spec) + + This extension builds upon the EXT_geometry_shader4 specification to + provide two additional capabilities: + + * Support for QUADS, QUAD_STRIP, and POLYGON primitive types when + geometry shaders are enabled. Such primitives will be tessellated + into individual triangles. + + * Setting the value of GEOMETRY_VERTICES_OUT_EXT will take effect + immediately. It is not necessary to link the program object in + order for this change to take effect, as is the case in the EXT + version of this extension. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/geometry_shader4.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.geometry_shader4 import * +from OpenGL.raw.GL.NV.geometry_shader4 import _EXTENSION_NAME + +def glInitGeometryShader4NV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/light_max_exponent.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/light_max_exponent.py new file mode 100644 index 0000000000000000000000000000000000000000..c4b854ae38cdef26bc2b6c9c1b8e9ecc9ba3fcdb --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/light_max_exponent.py @@ -0,0 +1,37 @@ +'''OpenGL extension NV.light_max_exponent + +This module customises the behaviour of the +OpenGL.raw.GL.NV.light_max_exponent to provide a more +Python-friendly API + +Overview (from the spec) + + Default OpenGL does not permit a shininess or spot exponent over + 128.0. This extension permits implementations to support and + advertise a maximum shininess and spot exponent beyond 128.0. + + Note that extremely high exponents for shininess and/or spot light + cutoff will require sufficiently high tessellation for acceptable + lighting results. + + Paul Deifenbach's thesis suggests that higher exponents are + necessary to approximate BRDFs with per-vertex ligthing and + multiple passes. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/light_max_exponent.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.light_max_exponent import * +from OpenGL.raw.GL.NV.light_max_exponent import _EXTENSION_NAME + +def glInitLightMaxExponentNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/packed_depth_stencil.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/packed_depth_stencil.py new file mode 100644 index 0000000000000000000000000000000000000000..fc4aedf1710611f221faa66ae72c50f5a73f8017 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/packed_depth_stencil.py @@ -0,0 +1,94 @@ +'''OpenGL extension NV.packed_depth_stencil + +This module customises the behaviour of the +OpenGL.raw.GL.NV.packed_depth_stencil to provide a more +Python-friendly API + +Overview (from the spec) + + Many OpenGL implementations have chosen to interleave the depth and + stencil buffers into one buffer, often with 24 bits of depth + precision and 8 bits of stencil data. 32 bits is more than is needed + for the depth buffer much of the time; a 24-bit depth buffer, on the + other hand, requires that reads and writes of depth data be unaligned + with respect to power-of-two boundaries. On the other hand, 8 bits + of stencil data is more than sufficient for most applications, so it + is only natural to pack the two buffers into a single buffer with + both depth and stencil data. OpenGL never provides direct access to + the buffers, so the OpenGL implementation can provide an interface to + applications where it appears the one merged buffer is composed of + two logical buffers. + + One disadvantage of this scheme is that OpenGL lacks any means by + which this packed data can be handled efficiently. For example, when + an application reads from the 24-bit depth buffer, using the type + GL_UNSIGNED_SHORT will lose 8 bits of data, while GL_UNSIGNED_INT has + 8 too many. Both require expensive format conversion operations. A + 24-bit format would be no more suitable, because it would also suffer + from the unaligned memory accesses that made the standalone 24-bit + depth buffer an unattractive proposition in the first place. + + Many applications, such as parallel rendering applications, may also + wish to draw to or read back from both the depth and stencil buffers + at the same time. Currently this requires two separate operations, + reducing performance. Since the buffers are interleaved, drawing to + or reading from both should be no more expensive than using just one; + in some cases, it may even be cheaper. + + This extension provides a new data format, GL_DEPTH_STENCIL_NV, that + can be used with the glDrawPixels, glReadPixels, and glCopyPixels + commands, as well as a packed data type, GL_UNSIGNED_INT_24_8_NV, + that is meant to be used with GL_DEPTH_STENCIL_NV. No other formats + are supported with GL_DEPTH_STENCIL_NV. If SGIX_depth_texture is + supported, GL_DEPTH_STENCIL_NV/GL_UNSIGNED_INT_24_8_NV data can also + be used for textures; this provides a more efficient way to supply + data for a 24-bit depth texture. + + GL_DEPTH_STENCIL_NV data, when passed through the pixel path, + undergoes both depth and stencil operations. The depth data is + scaled and biased by the current GL_DEPTH_SCALE and GL_DEPTH_BIAS, + while the stencil data is shifted and offset by the current + GL_INDEX_SHIFT and GL_INDEX_OFFSET. The stencil data is also put + through the stencil-to-stencil pixel map. + + glDrawPixels of GL_DEPTH_STENCIL_NV data operates similarly to that + of GL_STENCIL_INDEX data, bypassing the OpenGL fragment pipeline + entirely, unlike the treatment of GL_DEPTH_COMPONENT data. The + stencil and depth masks are applied, as are the pixel ownership and + scissor tests, but all other operations are skipped. + + glReadPixels of GL_DEPTH_STENCIL_NV data reads back a rectangle from + both the depth and stencil buffers. + + glCopyPixels of GL_DEPTH_STENCIL_NV data copies a rectangle from + both the depth and stencil buffers. Like glDrawPixels, it applies + both the stencil and depth masks but skips the remainder of the + OpenGL fragment pipeline. + + glTex[Sub]Image[1,2,3]D of GL_DEPTH_STENCIL_NV data loads depth data + into a depth texture. glGetTexImage of GL_DEPTH_STENCIL_NV data can + be used to retrieve depth data from a depth texture. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/packed_depth_stencil.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.packed_depth_stencil import * +from OpenGL.raw.GL.NV.packed_depth_stencil import _EXTENSION_NAME + +def glInitPackedDepthStencilNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION +# Setup the new image types +from OpenGL import images +from OpenGL.raw.GL.VERSION.GL_1_1 import GL_UNSIGNED_INT +images.TYPE_TO_ARRAYTYPE[ GL_UNSIGNED_INT_24_8_NV ] = GL_UNSIGNED_INT +images.TIGHT_PACK_FORMATS[ GL_UNSIGNED_INT_24_8_NV ] = 4 +images.COMPONENT_COUNTS[ GL_DEPTH_STENCIL_NV ] = 4 diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/parameter_buffer_object.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/parameter_buffer_object.py new file mode 100644 index 0000000000000000000000000000000000000000..ac5fd7ee91212985bb84d076ca3aaf7ca62e80a1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/parameter_buffer_object.py @@ -0,0 +1,56 @@ +'''OpenGL extension NV.parameter_buffer_object + +This module customises the behaviour of the +OpenGL.raw.GL.NV.parameter_buffer_object to provide a more +Python-friendly API + +Overview (from the spec) + + This extension, in conjunction with NV_gpu_program4, provides a new type + of program parameter than can be used as a constant during vertex, + fragment, or geometry program execution. Each program target has a set of + parameter buffer binding points to which buffer objects can be attached. + + A vertex, fragment, or geometry program can read data from the attached + buffer objects using a binding of the form "program.buffer[a][b]". This + binding reads data from the buffer object attached to binding point . + The buffer object attached is treated either as an array of 32-bit words + or an array of four-component vectors, and the binding above reads the + array element numbered . + + The use of buffer objects allows applications to change large blocks of + program parameters at once, simply by binding a new buffer object. It + also provides a number of new ways to load parameter values, including + readback from the frame buffer (EXT_pixel_buffer_object), transform + feedback (NV_transform_feedback), buffer object loading functions such as + MapBuffer and BufferData, as well as dedicated parameter buffer update + functions provided by this extension. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/parameter_buffer_object.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.parameter_buffer_object import * +from OpenGL.raw.GL.NV.parameter_buffer_object import _EXTENSION_NAME + +def glInitParameterBufferObjectNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + +# INPUT glProgramBufferParametersfvNV.params size not checked against count +glProgramBufferParametersfvNV=wrapper.wrapper(glProgramBufferParametersfvNV).setInputArraySize( + 'params', None +) +# INPUT glProgramBufferParametersIivNV.params size not checked against count +glProgramBufferParametersIivNV=wrapper.wrapper(glProgramBufferParametersIivNV).setInputArraySize( + 'params', None +) +# INPUT glProgramBufferParametersIuivNV.params size not checked against count +glProgramBufferParametersIuivNV=wrapper.wrapper(glProgramBufferParametersIuivNV).setInputArraySize( + 'params', None +) +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/present_video.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/present_video.py new file mode 100644 index 0000000000000000000000000000000000000000..6e5847196e3eba7b4b3805d46661c6b51220fecb --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/present_video.py @@ -0,0 +1,51 @@ +'''OpenGL extension NV.present_video + +This module customises the behaviour of the +OpenGL.raw.GL.NV.present_video to provide a more +Python-friendly API + +Overview (from the spec) + + This extension provides a mechanism for displaying textures and + renderbuffers on auxiliary video output devices. It allows an + application to specify separate buffers for the individual + fields used with interlaced output. It also provides a way + to present frames or field pairs simultaneously in two separate + video streams. It also allows an application to request when images + should be displayed, and to obtain feedback on exactly when images + are actually first displayed. + + This specification attempts to avoid language that would tie it to + any particular hardware or vendor. However, it should be noted that + it has been designed specifically for use with NVIDIA SDI products + and the features and limitations of the spec compliment those of + NVIDIA's line of SDI video output devices. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/present_video.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.present_video import * +from OpenGL.raw.GL.NV.present_video import _EXTENSION_NAME + +def glInitPresentVideoNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + +glGetVideoivNV=wrapper.wrapper(glGetVideoivNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetVideouivNV=wrapper.wrapper(glGetVideouivNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetVideoi64vNV=wrapper.wrapper(glGetVideoi64vNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetVideoui64vNV=wrapper.wrapper(glGetVideoui64vNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/primitive_restart.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/primitive_restart.py new file mode 100644 index 0000000000000000000000000000000000000000..d435bfeb2f76f8f657eb13a419bd0fa93bca68b4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/primitive_restart.py @@ -0,0 +1,49 @@ +'''OpenGL extension NV.primitive_restart + +This module customises the behaviour of the +OpenGL.raw.GL.NV.primitive_restart to provide a more +Python-friendly API + +Overview (from the spec) + + This extension allows applications to easily and inexpensively + restart a primitive in its middle. A "primitive restart" is simply + the same as an End command, followed by another Begin command with + the same mode as the original. The typical expected use of this + feature is to draw a mesh with many triangle strips, though primitive + restarts are legal for all primitive types, even for points (where + they are not useful). + + Although the EXT_multi_draw_arrays extension did reduce the overhead + of such drawing techniques, they still remain more expensive than one + would like. + + This extension provides an extremely lightweight primitive restart, + which is accomplished by allowing the application to choose a special + index number that signals that a primitive restart should occur, + rather than a vertex being provoked. This index can be an arbitrary + 32-bit integer for maximum application convenience. + + In addition, for full orthogonality, a special OpenGL command is + provided to restart primitives when in immediate mode. This command + is not likely to increase performance in any significant fashion, but + providing it greatly simplifies the specification and implementation + of display list compilation and indirect rendering. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/primitive_restart.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.primitive_restart import * +from OpenGL.raw.GL.NV.primitive_restart import _EXTENSION_NAME + +def glInitPrimitiveRestartNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/register_combiners.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/register_combiners.py new file mode 100644 index 0000000000000000000000000000000000000000..fa516d08a504e9c1ff72f125081b9843005bd865 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/register_combiners.py @@ -0,0 +1,117 @@ +'''OpenGL extension NV.register_combiners + +This module customises the behaviour of the +OpenGL.raw.GL.NV.register_combiners to provide a more +Python-friendly API + +Overview (from the spec) + + NVIDIA's next-generation graphics processor and its derivative + designs support an extremely configurable mechanism know as "register + combiners" for computing fragment colors. + + The register combiner mechanism is a significant redesign of NVIDIA's + original TNT combiner mechanism as introduced by NVIDIA's RIVA + TNT graphics processor. Familiarity with the TNT combiners will + help the reader appreciate the greatly enhanced register combiners + functionality (see the NV_texture_env_combine4 OpenGL extension + specification for this background). The register combiner mechanism + has the following enhanced functionality: + + The numeric range of combiner computations is from [-1,1] + (instead of TNT's [0,1] numeric range), + + The set of available combiner inputs is expanded to include the + secondary color, fog color, fog factor, and a second combiner + constant color (TNT's available combiner inputs consist of + only zero, a single combiner constant color, the primary color, + texture 0, texture 1, and, in the case of combiner 1, the result + of combiner 0). + + Each combiner variable input can be independently scaled and + biased into several possible numeric ranges (TNT can only + complement combiner inputs). + + Each combiner stage computes three distinct outputs (instead + TNT's single combiner output). + + The output operations include support for computing dot products + (TNT has no support for computing dot products). + + After each output operation, there is a configurable scale and bias + applied (TNT's combiner operations builds in a scale and/or bias + into some of its combiner operations). + + Each input variable for each combiner stage is fetched from any + entry in a combiner register set. Moreover, the outputs of each + combiner stage are written into the register set of the subsequent + combiner stage (TNT could only use the result from combiner 0 as + a possible input to combiner 1; TNT lacks the notion of an + input/output register set). + + The register combiner mechanism supports at least two general + combiner stages and then a special final combiner stage appropriate + for applying a color sum and fog computation (TNT provides two + simpler combiner stages, and TNT's color sum and fog stages are + hard-wired and not subsumed by the combiner mechanism as in register + combiners). + + The register combiners fit into the OpenGL pipeline as a rasterization + processing stage operating in parallel to the traditional OpenGL + texture environment, color sum, AND fog application. Enabling this + extension bypasses OpenGL's existing texture environment, color + sum, and fog application processing and instead use the register + combiners. The combiner and texture environment state is orthogonal + so modifying combiner state does not change the traditional OpenGL + texture environment state and the texture environment state is + ignored when combiners are enabled. + + OpenGL application developers can use the register combiner mechanism + for very sophisticated shading techniques. For example, an + approximation of Blinn's bump mapping technique can be achieved with + the combiner mechanism. Additionally, multi-pass shading models + that require several passes with unextended OpenGL 1.2 functionality + can be implemented in several fewer passes with register combiners. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/register_combiners.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.register_combiners import * +from OpenGL.raw.GL.NV.register_combiners import _EXTENSION_NAME + +def glInitRegisterCombinersNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + +# INPUT glCombinerParameterfvNV.params size not checked against 'pname' +glCombinerParameterfvNV=wrapper.wrapper(glCombinerParameterfvNV).setInputArraySize( + 'params', None +) +# INPUT glCombinerParameterivNV.params size not checked against 'pname' +glCombinerParameterivNV=wrapper.wrapper(glCombinerParameterivNV).setInputArraySize( + 'params', None +) +glGetCombinerInputParameterfvNV=wrapper.wrapper(glGetCombinerInputParameterfvNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetCombinerInputParameterivNV=wrapper.wrapper(glGetCombinerInputParameterivNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetCombinerOutputParameterfvNV=wrapper.wrapper(glGetCombinerOutputParameterfvNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetCombinerOutputParameterivNV=wrapper.wrapper(glGetCombinerOutputParameterivNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetFinalCombinerInputParameterfvNV=wrapper.wrapper(glGetFinalCombinerInputParameterfvNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetFinalCombinerInputParameterivNV=wrapper.wrapper(glGetFinalCombinerInputParameterivNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/shader_buffer_load.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/shader_buffer_load.py new file mode 100644 index 0000000000000000000000000000000000000000..4d88ee5f144b723e6e4612bd6b3f3276fe8cad1e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/shader_buffer_load.py @@ -0,0 +1,135 @@ +'''OpenGL extension NV.shader_buffer_load + +This module customises the behaviour of the +OpenGL.raw.GL.NV.shader_buffer_load to provide a more +Python-friendly API + +Overview (from the spec) + + At a very coarse level, GL has evolved in a way that allows + applications to replace many of the original state machine variables + with blocks of user-defined data. For example, the current vertex + state has been augmented by vertex buffer objects, fixed-function + shading state and parameters have been replaced by shaders/programs + and constant buffers, etc.. Applications switch between coarse sets + of state by binding objects to the context or to other container + objects (e.g. vertex array objects) instead of manipulating state + variables of the context. In terms of the number of GL commands + required to draw an object, modern applications are orders of + magnitude more efficient than legacy applications, but this explosion + of objects bound to other objects has led to a new bottleneck - + pointer chasing and CPU L2 cache misses in the driver, and general + L2 cache pollution. + + This extension provides a mechanism to read from a flat, 64-bit GPU + address space from programs/shaders, to query GPU addresses of buffer + objects at the API level, and to bind buffer objects to the context in + such a way that they can be accessed via their GPU addresses in any + shader stage. + + The intent is that applications can avoid re-binding buffer objects + or updating constants between each Draw call and instead simply use + a VertexAttrib (or TexCoord, or InstanceID, or...) to "point" to the + new object's state. In this way, one of the cheapest "state" updates + (from the CPU's point of view) can be used to effect a significant + state change in the shader similarly to how a pointer change may on + the CPU. At the same time, this relieves the limits on how many + buffer objects can be accessed at once by shaders, and allows these + buffer object accesses to be exposed as C-style pointer dereferences + in the shading language. + + As a very simple example, imagine packing a group of similar objects' + constants into a single buffer object and pointing your program + at object by setting "glVertexAttribI1iEXT(attrLoc, i);" + and using a shader as such: + + struct MyObjectType { + mat4x4 modelView; + vec4 materialPropertyX; + // etc. + }; + uniform MyObjectType *allObjects; + in int objectID; // bound to attrLoc + + ... + + mat4x4 thisObjectsMatrix = allObjects[objectID].modelView; + // do transform, shading, etc. + + This is beneficial in much the same way that texture arrays allow + choosing between similar, but independent, texture maps with a single + coordinate identifying which slice of the texture to use. It also + resembles instancing, where a lightweight change (incrementing the + instance ID) can be used to generate a different and interesting + result, but with additional flexibility over instancing because the + values are app-controlled and not a single incrementing counter. + + Dependent pointer fetches are allowed, so more complex scene graph + structures can be built into buffer objects providing significant new + flexibility in the use of shaders. Another simple example, showing + something you can't do with existing functionality, is to do dependent + fetches into many buffer objects: + + GenBuffers(N, dataBuffers); + GenBuffers(1, &pointerBuffer); + + GLuint64EXT gpuAddrs[N]; + for (i = 0; i < N; ++i) { + BindBuffer(target, dataBuffers[i]); + BufferData(target, size[i], myData[i], STATIC_DRAW); + + // get the address of this buffer and make it resident. + GetBufferParameterui64vNV(target, BUFFER_GPU_ADDRESS, + gpuaddrs[i]); + MakeBufferResidentNV(target, READ_ONLY); + } + + GLuint64EXT pointerBufferAddr; + BindBuffer(target, pointerBuffer); + BufferData(target, sizeof(GLuint64EXT)*N, gpuAddrs, STATIC_DRAW); + GetBufferParameterui64vNV(target, BUFFER_GPU_ADDRESS, + &pointerBufferAddr); + MakeBufferResidentNV(target, READ_ONLY); + + // now in the shader, we can use a double indirection + vec4 **ptrToBuffers = pointerBufferAddr; + vec4 *ptrToBufferI = ptrToBuffers[i]; + + This allows simultaneous access to more buffers than + EXT_bindable_uniform (MAX_VERTEX_BINDABLE_UNIFORMS, etc.) and each + can be larger than MAX_BINDABLE_UNIFORM_SIZE. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/shader_buffer_load.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.shader_buffer_load import * +from OpenGL.raw.GL.NV.shader_buffer_load import _EXTENSION_NAME + +def glInitShaderBufferLoadNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + +glGetBufferParameterui64vNV=wrapper.wrapper(glGetBufferParameterui64vNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetNamedBufferParameterui64vNV=wrapper.wrapper(glGetNamedBufferParameterui64vNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetIntegerui64vNV=wrapper.wrapper(glGetIntegerui64vNV).setOutput( + 'result',size=_glgets._glget_size_mapping,pnameArg='value',orPassIn=True +) +# INPUT glUniformui64vNV.value size not checked against count +glUniformui64vNV=wrapper.wrapper(glUniformui64vNV).setInputArraySize( + 'value', None +) +# OUTPUT glGetUniformui64vNV.params COMPSIZE(program,location) +# INPUT glProgramUniformui64vNV.value size not checked against count +glProgramUniformui64vNV=wrapper.wrapper(glProgramUniformui64vNV).setInputArraySize( + 'value', None +) +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/shader_thread_group.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/shader_thread_group.py new file mode 100644 index 0000000000000000000000000000000000000000..824451597264ef60b4b2b1691222ce96776e9114 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/shader_thread_group.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.shader_thread_group + +This module customises the behaviour of the +OpenGL.raw.GL.NV.shader_thread_group to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/shader_thread_group.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.shader_thread_group import * +from OpenGL.raw.GL.NV.shader_thread_group import _EXTENSION_NAME + +def glInitShaderThreadGroupNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/tessellation_program5.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/tessellation_program5.py new file mode 100644 index 0000000000000000000000000000000000000000..9e1270c7d5e4ae4682db0012e16db92da7c4f7f0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/tessellation_program5.py @@ -0,0 +1,99 @@ +'''OpenGL extension NV.tessellation_program5 + +This module customises the behaviour of the +OpenGL.raw.GL.NV.tessellation_program5 to provide a more +Python-friendly API + +Overview (from the spec) + + This extension, in conjunction with the ARB_tessellation_shader extension, + introduces a new tessellation stage to the OpenGL primitive processing + pipeline. The ARB_tessellation_shader extension provides programmable + shading functionality using the OpenGL Shading Language as its base; this + extension provides assembly programmable shaders building on the family of + assembly programmability extensions including ARB_vertex_program, + ARB_fragment_program, NV_gpu_program4, and NV_geometry_program4. + + This extension adds a new basic primitive type, called a patch, which + consists of an array of vertices plus some associated per-patch state. It + also adds two new assembly program types: a tessellation control program + that transforms a patch into a new patch and a tessellation evaluation + program that computes the position and attributes of each vertex produced + by the tesselator. + + When tessellation is active, it begins by running the optional + tessellation control program, if enabled. This program consumes a + variable-size input patch and produces a new fixed-size output patch. The + output patch consists of an array of vertices, and a set of per-patch + attributes. The per-patch attributes include tessellation levels that + control how finely the patch will be tessellated. For each patch + processed, multiple tessellation control program invocations are performed + -- one per output patch vertex. Each tessellation control program + invocation writes all the attributes of its corresponding output patch + vertex. A tessellation control program may also read the per-vertex + outputs of other tessellation control program invocations, as well as read + and write shared per-patch outputs. The tessellation control program + invocations for a single patch effectively run as a group. The GL + automatically synchronizes threads to ensure that when executing a given + instruction, all previous instructions have completed for all program + invocations in the group. + + The tessellation primitive generator then decomposes a patch into a new + set of primitives using the tessellation levels to determine how finely + tessellated the output should be. The primitive generator begins with + either a triangle or a quad, and splits each outer edge of the primitive + into a number of segments approximately equal to the corresponding element + of the outer tessellation level array. The interior of the primitive is + tessellated according to elements of the inner tessellation level array. + The primitive generator has three modes: TRIANGLES and QUADS split a + triangular or quad-shaped patch into a set of triangles that cover the + original patch; ISOLINES_NV splits a quad-shaped patch into a set of line + strips spanning the patch. Each vertex generated by the tessellation + primitive generator is assigned a (u,v) or (u,v,w) coordinate indicating + its relative location in the subdivided triangle or quad. + + For each vertex produced by the tessellation primitive generator, the + tessellation evaluation program is run to compute its position and other + attributes of the vertex, using its (u,v) or (u,v,w) coordinate. When + computing the final vertex attributes, the tessellation evaluation program + can also read the attributes of any of the vertices of the patch written + by the tessellation control program. Tessellation evaluation program + invocations are completely independent, although all invocations for a + single patch share the same collection of input vertices and per-patch + attributes. + + The tessellator operates on vertices after they have been transformed by a + vertex program or fixed-function vertex processing. The primitives + generated by the tessellator are passed further down the OpenGL pipeline, + where they can be used as inputs to geometry programs, transform feedback, + and the rasterizer. + + The tessellation control and evaluation programs are both optional. If + neither program type is present, the tessellation stage has no effect. If + no tessellation control program is present, the input patch provided by + the application is passed directly to the tessellation primitive + generator, and a set of fixed tessellation level parameters (specified via + the PatchParameterfv function) is used to control primitive generation. + If no tessellation evaluation program is present, the output patch + produced by the tessellation control program is passed as a patch to + subsequent pipeline stages, where it can be consumed by geometry programs, + transform feedback, or the rasterizer. + + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/tessellation_program5.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.tessellation_program5 import * +from OpenGL.raw.GL.NV.tessellation_program5 import _EXTENSION_NAME + +def glInitTessellationProgram5NV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/texture_shader2.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/texture_shader2.py new file mode 100644 index 0000000000000000000000000000000000000000..38a2f259b0fe08ccb20522fafc86c236d7d78a99 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/texture_shader2.py @@ -0,0 +1,45 @@ +'''OpenGL extension NV.texture_shader2 + +This module customises the behaviour of the +OpenGL.raw.GL.NV.texture_shader2 to provide a more +Python-friendly API + +Overview (from the spec) + + This extension extends the NV_texture_shader functionality to + support texture shader operations for 3D textures. + + See the NV_texture_shader extension for information about the + texture shader operational model. + + The two new texture shader operations are: + + + + 22. TEXTURE_3D - Accesses a 3D texture via (s/q,t/q,r/q). + + + + 23. DOT_PRODUCT_TEXTURE_3D_NV - When preceded by two DOT_PRODUCT_NV + programs in the previous two texture shader stages, computes a + third similar dot product and composes the three dot products + into (s,t,r) texture coordinate set to access a 3D non-projective + texture. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/texture_shader2.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.texture_shader2 import * +from OpenGL.raw.GL.NV.texture_shader2 import _EXTENSION_NAME + +def glInitTextureShader2NV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/texture_shader3.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/texture_shader3.py new file mode 100644 index 0000000000000000000000000000000000000000..7644ff4b1733ddf38e261bfa0e265a5a98659384 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/texture_shader3.py @@ -0,0 +1,118 @@ +'''OpenGL extension NV.texture_shader3 + +This module customises the behaviour of the +OpenGL.raw.GL.NV.texture_shader3 to provide a more +Python-friendly API + +Overview (from the spec) + + NV_texture_shader3 extends the NV_texture_shader functionality by + adding several new texture shader operations, extending several + existing texture shader operations, adding a new HILO8 internal + format, and adding new and more flexible re-mapping modes for dot + product and dependent texture shader operations. + + See the NV_texture_shader extension for information about the + texture shader operational model. + + The fourteen new texture shader operations are: + + + + 24. OFFSET_PROJECTIVE_TEXTURE_2D_NV - Transforms the signed (ds,dt) + components of a previous texture unit by a 2x2 floating-point + matrix and then uses the result to offset the stage's texture + coordinates for a 2D non-projective texture. + + 25. OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV - Same as above except + the magnitude component of the previous texture unit result + scales the red, green, and blue components of the unsigned RGBA + texture 2D access. + + 26. OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV - Similar to + OFFSET_TEXTURE_2D_NV except that the texture access is into a + rectangular non-projective texture. + + 27. OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_SCALE_NV - Similar to + OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV except that the texture + access is into a rectangular non-projective texture. + + 28. OFFSET_HILO_TEXTURE_2D_NV - Similar to OFFSET_TEXTURE_2D_NV + but uses a (higher-precision) HILO base format texture rather + than a DSDT-type base format. + + 29. OFFSET_HILO_TEXTURE_RECTANGLE_NV - Similar to + OFFSET_TEXTURE_RECTANGLE_NV but uses a (higher-precision) + HILO base format texture rather than a DSDT-type base format. + + 30. OFFSET_HILO_PROJECTIVE_TEXTURE_2D_NV - Similar to + OFFSET_PROJECTIVE_TEXTURE_2D_NV but uses a (higher-precision) + HILO base format texture rather than a DSDT-type base format. + + 31. OFFSET_HILO_PROJECTIVE_TEXTURE_RECTANGLE_NV - Similar to + OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV but uses a + (higher-precision) HILO base format texture rather than a + DSDT-type base format. + + (There are no "offset HILO texture scale" operations because + HILO textures have only two components with no third component + for scaling.) + + + + 32. DEPENDENT_HILO_TEXTURE_2D_NV - Converts the hi and lo components + of a previous shader HILO result into an (s,t) texture coordinate + set to access a 2D non-projective texture. + + 33. DEPENDENT_RGB_TEXTURE_3D_NV - Converts the red, green, and + blue components of a previous shader RGBA result into an (s,t,r) + texture coordinate set to access a 3D non-projective texture. + + 34. DEPENDENT_RGB_TEXTURE_CUBE_MAP_NV - Converts the red, green, + and blue components of a previous shader RGBA result into an + (s,t,r) texture coordinate set to access a cube map texture. + + + + 35. DOT_PRODUCT_PASS_THROUGH_NV - Computes a dot product in the + manner of the DOT_PRODUCT_NV operation and the result is [0,1] + clamped and smeared to generate the texture unit RGBA result. + + + + 36. DOT_PRODUCT_TEXTURE_1D_NV - Computes a dot product in the manner + of the DOT_PRODUCT_NV operation and uses the result as the s + texture coordinate to access a 2D non-projective texture. + + + + 37. DOT_PRODUCT_AFFINE_DEPTH_REPLACE_NV - Computes a dot product + in the manner of the DOT_PRODUCT_NV operation and the result + is [0,1] clamped and replaces the fragment's window-space + depth value. The texture unit RGBA result is (0,0,0,0). + + Two new internal texture formats have been added: HILO8_NV and + SIGNED_HILO8_NV. These texture formats allow HILO textures to be + stored in half the space; still the filtering for these internal + texture formats is done with 16-bit precision. + + One new unsigned RGBA dot product mapping mode (FORCE_BLUE_TO_ONE_NV) + forces the blue component to be 1.0 before computing a dot product. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/texture_shader3.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.texture_shader3 import * +from OpenGL.raw.GL.NV.texture_shader3 import _EXTENSION_NAME + +def glInitTextureShader3NV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/transform_feedback.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/transform_feedback.py new file mode 100644 index 0000000000000000000000000000000000000000..c287a91b92651a21f7b67acfda03106372f04e36 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/transform_feedback.py @@ -0,0 +1,90 @@ +'''OpenGL extension NV.transform_feedback + +This module customises the behaviour of the +OpenGL.raw.GL.NV.transform_feedback to provide a more +Python-friendly API + +Overview (from the spec) + + This extension provides a new mode to the GL, called transform feedback, + which records vertex attributes of the primitives processed by the GL. + The selected attributes are written into buffer objects, and can be + written with each attribute in a separate buffer object or with all + attributes interleaved into a single buffer object. If a geometry program + or shader is active, the primitives recorded are those emitted by the + geometry program. Otherwise, transform feedback captures primitives whose + vertex are transformed by a vertex program or shader, or by fixed-function + vertex processing. In either case, the primitives captured are those + generated prior to clipping. Transform feedback mode is capable of + capturing transformed vertex data generated by fixed-function vertex + processing, outputs from assembly vertex or geometry programs, or varying + variables emitted from GLSL vertex or geometry shaders. + + The vertex data recorded in transform feedback mode is stored into buffer + objects as an array of vertex attributes. The regular representation and + the use of buffer objects allows the recorded data to be processed + directly by the GL without requiring CPU intervention to copy data. In + particular, transform feedback data can be used for vertex arrays (via + vertex buffer objects), as the source for pixel data (via pixel buffer + objects), as program constant data (via the NV_parameter_buffer_object or + EXT_bindable_uniform extension), or via any other extension that makes use + of buffer objects. + + This extension introduces new query object support to allow transform + feedback mode to operate asynchronously. Query objects allow applications + to determine when transform feedback results are complete, as well as the + number of primitives processed and written back to buffer objects while in + transform feedback mode. This extension also provides a new rasterizer + discard enable, which allows applications to use transform feedback to + capture vertex attributes without rendering anything. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/transform_feedback.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.transform_feedback import * +from OpenGL.raw.GL.NV.transform_feedback import _EXTENSION_NAME + +def glInitTransformFeedbackNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + +# INPUT glTransformFeedbackAttribsNV.attribs size not checked against 'count' +glTransformFeedbackAttribsNV=wrapper.wrapper(glTransformFeedbackAttribsNV).setInputArraySize( + 'attribs', None +) +# INPUT glTransformFeedbackVaryingsNV.locations size not checked against count +glTransformFeedbackVaryingsNV=wrapper.wrapper(glTransformFeedbackVaryingsNV).setInputArraySize( + 'locations', None +) +# INPUT glActiveVaryingNV.name size not checked against 'name' +glActiveVaryingNV=wrapper.wrapper(glActiveVaryingNV).setInputArraySize( + 'name', None +) +# INPUT glGetVaryingLocationNV.name size not checked against 'name' +glGetVaryingLocationNV=wrapper.wrapper(glGetVaryingLocationNV).setInputArraySize( + 'name', None +) +# OUTPUT glGetActiveVaryingNV.name COMPSIZE(program,index,bufSize) +glGetActiveVaryingNV=wrapper.wrapper(glGetActiveVaryingNV).setOutput( + 'length',size=(1,),orPassIn=True +).setOutput( + 'type',size=(1,),orPassIn=True +).setOutput( + 'size',size=(1,),orPassIn=True +) +glGetTransformFeedbackVaryingNV=wrapper.wrapper(glGetTransformFeedbackVaryingNV).setOutput( + 'location',size=(1,),orPassIn=True +) +# INPUT glTransformFeedbackStreamAttribsNV.attribs size not checked against count +# INPUT glTransformFeedbackStreamAttribsNV.bufstreams size not checked against nbuffers +glTransformFeedbackStreamAttribsNV=wrapper.wrapper(glTransformFeedbackStreamAttribsNV).setInputArraySize( + 'attribs', None +).setInputArraySize( + 'bufstreams', None +) +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vertex_attrib_integer_64bit.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vertex_attrib_integer_64bit.py new file mode 100644 index 0000000000000000000000000000000000000000..5c408a3c457ed4f5f95b2f5b7612ef5b2fbec872 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vertex_attrib_integer_64bit.py @@ -0,0 +1,64 @@ +'''OpenGL extension NV.vertex_attrib_integer_64bit + +This module customises the behaviour of the +OpenGL.raw.GL.NV.vertex_attrib_integer_64bit to provide a more +Python-friendly API + +Overview (from the spec) + + This extension provides support for specifying vertex attributes with + 64-bit integer components, analagous to the 64-bit floating point support + added in EXT_vertex_attrib_64bit. + + Additionally, it provides the VertexAttribLFormatNV entry point to specify + bindless vertex attribute arrays with 64-bit integer or floating-point + components in conjunction with the NV_vertex_buffer_unified_memory + extension. + + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/vertex_attrib_integer_64bit.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.vertex_attrib_integer_64bit import * +from OpenGL.raw.GL.NV.vertex_attrib_integer_64bit import _EXTENSION_NAME + +def glInitVertexAttribInteger64BitNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + +glVertexAttribL1i64vNV=wrapper.wrapper(glVertexAttribL1i64vNV).setInputArraySize( + 'v', 1 +) +glVertexAttribL2i64vNV=wrapper.wrapper(glVertexAttribL2i64vNV).setInputArraySize( + 'v', 2 +) +glVertexAttribL3i64vNV=wrapper.wrapper(glVertexAttribL3i64vNV).setInputArraySize( + 'v', 3 +) +glVertexAttribL4i64vNV=wrapper.wrapper(glVertexAttribL4i64vNV).setInputArraySize( + 'v', 4 +) +glVertexAttribL1ui64vNV=wrapper.wrapper(glVertexAttribL1ui64vNV).setInputArraySize( + 'v', 1 +) +glVertexAttribL2ui64vNV=wrapper.wrapper(glVertexAttribL2ui64vNV).setInputArraySize( + 'v', 2 +) +glVertexAttribL3ui64vNV=wrapper.wrapper(glVertexAttribL3ui64vNV).setInputArraySize( + 'v', 3 +) +glVertexAttribL4ui64vNV=wrapper.wrapper(glVertexAttribL4ui64vNV).setInputArraySize( + 'v', 4 +) +glGetVertexAttribLi64vNV=wrapper.wrapper(glGetVertexAttribLi64vNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +glGetVertexAttribLui64vNV=wrapper.wrapper(glGetVertexAttribLui64vNV).setOutput( + 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True +) +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vertex_buffer_unified_memory.py b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vertex_buffer_unified_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd5a5ba44c47da6d2913ab8aaaa3db5078ff84a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GL/NV/vertex_buffer_unified_memory.py @@ -0,0 +1,37 @@ +'''OpenGL extension NV.vertex_buffer_unified_memory + +This module customises the behaviour of the +OpenGL.raw.GL.NV.vertex_buffer_unified_memory to provide a more +Python-friendly API + +Overview (from the spec) + + This extension provides a mechanism to specify vertex attrib and + element array locations using GPU addresses. + + Binding vertex buffers is one of the most frequent and expensive + operations in many GL applications, due to the cost of chasing + pointers and binding objects described in the Overview of + NV_shader_buffer_load. The intent of this extension is to enable a + way for the application to specify vertex attrib state that alleviates + the overhead of object binds and driver memory management. + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/vertex_buffer_unified_memory.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GL import _types, _glgets +from OpenGL.raw.GL.NV.vertex_buffer_unified_memory import * +from OpenGL.raw.GL.NV.vertex_buffer_unified_memory import _EXTENSION_NAME + +def glInitVertexBufferUnifiedMemoryNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + +glGetIntegerui64i_vNV=wrapper.wrapper(glGetIntegerui64i_vNV).setOutput( + 'result',size=_glgets._glget_size_mapping,pnameArg='value',orPassIn=True +) +### END AUTOGENERATED SECTION \ No newline at end of file