diff --git a/parrot/share/terminfo/w/wy350-vb b/parrot/share/terminfo/w/wy350-vb new file mode 100644 index 0000000000000000000000000000000000000000..893e2cdc814113d69d81326d4cb7ac8c7fed742d Binary files /dev/null and b/parrot/share/terminfo/w/wy350-vb differ diff --git a/parrot/share/terminfo/w/wy520-24 b/parrot/share/terminfo/w/wy520-24 new file mode 100644 index 0000000000000000000000000000000000000000..9bfbec7c3bf2f2f402d455d816db0dad483a9a8c Binary files /dev/null and b/parrot/share/terminfo/w/wy520-24 differ diff --git a/parrot/share/terminfo/w/wy520-36 b/parrot/share/terminfo/w/wy520-36 new file mode 100644 index 0000000000000000000000000000000000000000..ed9343203a286aa6cdbbac6ee19fea37c61e42a6 Binary files /dev/null and b/parrot/share/terminfo/w/wy520-36 differ diff --git a/parrot/share/terminfo/w/wy520-36wpc b/parrot/share/terminfo/w/wy520-36wpc new file mode 100644 index 0000000000000000000000000000000000000000..fffaa926d66a78ee6bcc092ac2da2b37721746dd Binary files /dev/null and b/parrot/share/terminfo/w/wy520-36wpc differ diff --git a/parrot/share/terminfo/w/wy60-25 b/parrot/share/terminfo/w/wy60-25 new file mode 100644 index 0000000000000000000000000000000000000000..d8e256ec10340bfbecbae203d989a950988d50af Binary files /dev/null and b/parrot/share/terminfo/w/wy60-25 differ diff --git a/parrot/share/terminfo/w/wy60-vb b/parrot/share/terminfo/w/wy60-vb new file mode 100644 index 0000000000000000000000000000000000000000..cb70b612b6bdc81aad20f2094e1f1d3f530980c1 Binary files /dev/null and b/parrot/share/terminfo/w/wy60-vb differ diff --git a/parrot/share/terminfo/w/wy75-vb b/parrot/share/terminfo/w/wy75-vb new file mode 100644 index 0000000000000000000000000000000000000000..764d030f7b028f4531ff95443af3d05ea83e2967 Binary files /dev/null and b/parrot/share/terminfo/w/wy75-vb differ diff --git a/parrot/share/terminfo/w/wy85-w b/parrot/share/terminfo/w/wy85-w new file mode 100644 index 0000000000000000000000000000000000000000..4a17128f703ade4bfb6559234408c832a0368e2d Binary files /dev/null and b/parrot/share/terminfo/w/wy85-w differ diff --git a/parrot/share/terminfo/w/wyse50-w b/parrot/share/terminfo/w/wyse50-w new file mode 100644 index 0000000000000000000000000000000000000000..85e8dd395de0b83431583b4280613c6ab62f3318 Binary files /dev/null and b/parrot/share/terminfo/w/wyse50-w differ diff --git a/parrot/share/terminfo/w/wyse520-36pc b/parrot/share/terminfo/w/wyse520-36pc new file mode 100644 index 0000000000000000000000000000000000000000..5048dfe740544f1984515cd71ce5e6e54d70db0a Binary files /dev/null and b/parrot/share/terminfo/w/wyse520-36pc differ diff --git a/parrot/share/terminfo/w/wyse60-43 b/parrot/share/terminfo/w/wyse60-43 new file mode 100644 index 0000000000000000000000000000000000000000..75d94671a07c2cffb5af0562b6dfd455aec5e914 Binary files /dev/null and b/parrot/share/terminfo/w/wyse60-43 differ diff --git a/parrot/share/terminfo/w/wyse60-vb b/parrot/share/terminfo/w/wyse60-vb new file mode 100644 index 0000000000000000000000000000000000000000..cb70b612b6bdc81aad20f2094e1f1d3f530980c1 Binary files /dev/null and b/parrot/share/terminfo/w/wyse60-vb differ diff --git a/parrot/share/terminfo/w/wyse75-vb b/parrot/share/terminfo/w/wyse75-vb new file mode 100644 index 0000000000000000000000000000000000000000..764d030f7b028f4531ff95443af3d05ea83e2967 Binary files /dev/null and b/parrot/share/terminfo/w/wyse75-vb differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..351748ef5ec0f69dcfb54e58d291b003c840d5fc Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55cac3f64e45278d0e424b6fc82fd53bf468f195 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py b/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/astype.py b/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/astype.py new file mode 100644 index 0000000000000000000000000000000000000000..f5579082c679bf131c056f3f2029b2485e88bd0d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/astype.py @@ -0,0 +1,301 @@ +""" +Functions for implementing 'astype' methods according to pandas conventions, +particularly ones that differ from numpy. +""" +from __future__ import annotations + +import inspect +from typing import ( + TYPE_CHECKING, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs.timedeltas import array_to_timedelta64 +from pandas.errors import IntCastingNaNError + +from pandas.core.dtypes.common import ( + is_object_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + NumpyEADtype, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + DtypeObj, + IgnoreRaise, + ) + + from pandas.core.arrays import ExtensionArray + +_dtype_obj = np.dtype(object) + + +@overload +def _astype_nansafe( + arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ... +) -> np.ndarray: + ... + + +@overload +def _astype_nansafe( + arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ... +) -> ExtensionArray: + ... + + +def _astype_nansafe( + arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False +) -> ArrayLike: + """ + Cast the elements of an array to a given dtype a nan-safe manner. + + Parameters + ---------- + arr : ndarray + dtype : np.dtype or ExtensionDtype + copy : bool, default True + If False, a view will be attempted but may fail, if + e.g. the item sizes don't align. + skipna: bool, default False + Whether or not we should skip NaN when casting as a string-type. + + Raises + ------ + ValueError + The dtype was a datetime64/timedelta64 dtype, but it had no unit. + """ + + # dispatch on extension dtype if needed + if isinstance(dtype, ExtensionDtype): + return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy) + + elif not isinstance(dtype, np.dtype): # pragma: no cover + raise ValueError("dtype must be np.dtype or ExtensionDtype") + + if arr.dtype.kind in "mM": + from pandas.core.construction import ensure_wrapped_if_datetimelike + + arr = ensure_wrapped_if_datetimelike(arr) + res = arr.astype(dtype, copy=copy) + return np.asarray(res) + + if issubclass(dtype.type, str): + shape = arr.shape + if arr.ndim > 1: + arr = arr.ravel() + return lib.ensure_string_array( + arr, skipna=skipna, convert_na_value=False + ).reshape(shape) + + elif np.issubdtype(arr.dtype, np.floating) and dtype.kind in "iu": + return _astype_float_to_int_nansafe(arr, dtype, copy) + + elif arr.dtype == object: + # if we have a datetime/timedelta array of objects + # then coerce to datetime64[ns] and use DatetimeArray.astype + + if lib.is_np_dtype(dtype, "M"): + from pandas.core.arrays import DatetimeArray + + dta = DatetimeArray._from_sequence(arr, dtype=dtype) + return dta._ndarray + + elif lib.is_np_dtype(dtype, "m"): + from pandas.core.construction import ensure_wrapped_if_datetimelike + + # bc we know arr.dtype == object, this is equivalent to + # `np.asarray(to_timedelta(arr))`, but using a lower-level API that + # does not require a circular import. + tdvals = array_to_timedelta64(arr).view("m8[ns]") + + tda = ensure_wrapped_if_datetimelike(tdvals) + return tda.astype(dtype, copy=False)._ndarray + + if dtype.name in ("datetime64", "timedelta64"): + msg = ( + f"The '{dtype.name}' dtype has no unit. Please pass in " + f"'{dtype.name}[ns]' instead." + ) + raise ValueError(msg) + + if copy or arr.dtype == object or dtype == object: + # Explicit copy, or required since NumPy can't view from / to object. + return arr.astype(dtype, copy=True) + + return arr.astype(dtype, copy=copy) + + +def _astype_float_to_int_nansafe( + values: np.ndarray, dtype: np.dtype, copy: bool +) -> np.ndarray: + """ + astype with a check preventing converting NaN to an meaningless integer value. + """ + if not np.isfinite(values).all(): + raise IntCastingNaNError( + "Cannot convert non-finite values (NA or inf) to integer" + ) + if dtype.kind == "u": + # GH#45151 + if not (values >= 0).all(): + raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}") + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + return values.astype(dtype, copy=copy) + + +def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike: + """ + Cast array (ndarray or ExtensionArray) to the new dtype. + + Parameters + ---------- + values : ndarray or ExtensionArray + dtype : dtype object + copy : bool, default False + copy if indicated + + Returns + ------- + ndarray or ExtensionArray + """ + if values.dtype == dtype: + if copy: + return values.copy() + return values + + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + values = values.astype(dtype, copy=copy) + + else: + values = _astype_nansafe(values, dtype, copy=copy) + + # in pandas we don't store numpy str dtypes, so convert to object + if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + return values + + +def astype_array_safe( + values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise" +) -> ArrayLike: + """ + Cast array (ndarray or ExtensionArray) to the new dtype. + + This basically is the implementation for DataFrame/Series.astype and + includes all custom logic for pandas (NaN-safety, converting str to object, + not allowing ) + + Parameters + ---------- + values : ndarray or ExtensionArray + dtype : str, dtype convertible + copy : bool, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + Returns + ------- + ndarray or ExtensionArray + """ + errors_legal_values = ("raise", "ignore") + + if errors not in errors_legal_values: + invalid_arg = ( + "Expected value of kwarg 'errors' to be one of " + f"{list(errors_legal_values)}. Supplied value is '{errors}'" + ) + raise ValueError(invalid_arg) + + if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype): + msg = ( + f"Expected an instance of {dtype.__name__}, " + "but got the class instead. Try instantiating 'dtype'." + ) + raise TypeError(msg) + + dtype = pandas_dtype(dtype) + if isinstance(dtype, NumpyEADtype): + # Ensure we don't end up with a NumpyExtensionArray + dtype = dtype.numpy_dtype + + try: + new_values = astype_array(values, dtype, copy=copy) + except (ValueError, TypeError): + # e.g. _astype_nansafe can fail on object-dtype of strings + # trying to convert to float + if errors == "ignore": + new_values = values + else: + raise + + return new_values + + +def astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool: + """Checks if astype avoided copying the data. + + Parameters + ---------- + dtype : Original dtype + new_dtype : target dtype + + Returns + ------- + True if new data is a view or not guaranteed to be a copy, False otherwise + """ + if isinstance(dtype, np.dtype) and not isinstance(new_dtype, np.dtype): + new_dtype, dtype = dtype, new_dtype + + if dtype == new_dtype: + return True + + elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype): + # Only equal numpy dtypes avoid a copy + return False + + elif is_string_dtype(dtype) and is_string_dtype(new_dtype): + # Potentially! a view when converting from object to string + return True + + elif is_object_dtype(dtype) and new_dtype.kind == "O": + # When the underlying array has dtype object, we don't have to make a copy + return True + + elif dtype.kind in "mM" and new_dtype.kind in "mM": + dtype = getattr(dtype, "numpy_dtype", dtype) + new_dtype = getattr(new_dtype, "numpy_dtype", new_dtype) + return getattr(dtype, "unit", None) == getattr(new_dtype, "unit", None) + + numpy_dtype = getattr(dtype, "numpy_dtype", None) + new_numpy_dtype = getattr(new_dtype, "numpy_dtype", None) + + if numpy_dtype is None and isinstance(dtype, np.dtype): + numpy_dtype = dtype + + if new_numpy_dtype is None and isinstance(new_dtype, np.dtype): + new_numpy_dtype = new_dtype + + if numpy_dtype is not None and new_numpy_dtype is not None: + # if both have NumPy dtype or one of them is a numpy dtype + # they are only a view when the numpy dtypes are equal, e.g. + # int64 -> Int64 or int64[pyarrow] + # int64 -> Int32 copies + return numpy_dtype == new_numpy_dtype + + # Assume this is a view since we don't know for sure if a copy was made + return True diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/cast.py b/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/cast.py new file mode 100644 index 0000000000000000000000000000000000000000..7dd81ec59bc4974069a9efd353a6f46ecf21c621 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/cast.py @@ -0,0 +1,1973 @@ +""" +Routines for casting. +""" + +from __future__ import annotations + +import datetime as dt +import functools +from typing import ( + TYPE_CHECKING, + Any, + Literal, + TypeVar, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import ( + Interval, + Period, + lib, +) +from pandas._libs.missing import ( + NA, + NAType, + checknull, +) +from pandas._libs.tslibs import ( + NaT, + OutOfBoundsDatetime, + OutOfBoundsTimedelta, + Timedelta, + Timestamp, + is_supported_dtype, +) +from pandas._libs.tslibs.timedeltas import array_to_timedelta64 +from pandas.compat.numpy import np_version_gt2 +from pandas.errors import ( + IntCastingNaNError, + LossySetitemError, +) + +from pandas.core.dtypes.common import ( + ensure_int8, + ensure_int16, + ensure_int32, + ensure_int64, + ensure_object, + ensure_str, + is_bool, + is_complex, + is_float, + is_integer, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype as pandas_dtype_func, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + BaseMaskedDtype, + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PandasExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCExtensionArray, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.inference import is_list_like +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + na_value_for_dtype, + notna, +) + +from pandas.io._util import _arrow_dtype_mapping + +if TYPE_CHECKING: + from collections.abc import ( + Sequence, + Sized, + ) + + from pandas._typing import ( + ArrayLike, + Dtype, + DtypeObj, + NumpyIndexT, + Scalar, + npt, + ) + + from pandas import Index + from pandas.core.arrays import ( + Categorical, + DatetimeArray, + ExtensionArray, + IntervalArray, + PeriodArray, + TimedeltaArray, + ) + + +_int8_max = np.iinfo(np.int8).max +_int16_max = np.iinfo(np.int16).max +_int32_max = np.iinfo(np.int32).max + +_dtype_obj = np.dtype(object) + +NumpyArrayT = TypeVar("NumpyArrayT", bound=np.ndarray) + + +def maybe_convert_platform( + values: list | tuple | range | np.ndarray | ExtensionArray, +) -> ArrayLike: + """try to do platform conversion, allow ndarray or list here""" + arr: ArrayLike + + if isinstance(values, (list, tuple, range)): + arr = construct_1d_object_array_from_listlike(values) + else: + # The caller is responsible for ensuring that we have np.ndarray + # or ExtensionArray here. + arr = values + + if arr.dtype == _dtype_obj: + arr = cast(np.ndarray, arr) + arr = lib.maybe_convert_objects(arr) + + return arr + + +def is_nested_object(obj) -> bool: + """ + return a boolean if we have a nested object, e.g. a Series with 1 or + more Series elements + + This may not be necessarily be performant. + + """ + return bool( + isinstance(obj, ABCSeries) + and is_object_dtype(obj.dtype) + and any(isinstance(v, ABCSeries) for v in obj._values) + ) + + +def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar: + """ + Cast scalar to Timestamp or Timedelta if scalar is datetime-like + and dtype is not object. + + Parameters + ---------- + value : scalar + dtype : Dtype, optional + + Returns + ------- + scalar + """ + if dtype == _dtype_obj: + pass + elif isinstance(value, (np.datetime64, dt.datetime)): + value = Timestamp(value) + elif isinstance(value, (np.timedelta64, dt.timedelta)): + value = Timedelta(value) + + return value + + +def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType: + """ + If passed a scalar cast the scalar to a python native type. + + Parameters + ---------- + value : scalar or Series + + Returns + ------- + scalar or Series + """ + if is_float(value): + value = float(value) + elif is_integer(value): + value = int(value) + elif is_bool(value): + value = bool(value) + elif isinstance(value, (np.datetime64, np.timedelta64)): + value = maybe_box_datetimelike(value) + elif value is NA: + value = None + return value + + +def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar: + """ + Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting + into a numpy array. Failing to unbox would risk dropping nanoseconds. + + Notes + ----- + Caller is responsible for checking dtype.kind in "mM" + """ + if is_valid_na_for_dtype(value, dtype): + # GH#36541: can't fill array directly with pd.NaT + # > np.empty(10, dtype="datetime64[ns]").fill(pd.NaT) + # ValueError: cannot convert float NaN to integer + value = dtype.type("NaT", "ns") + elif isinstance(value, Timestamp): + if value.tz is None: + value = value.to_datetime64() + elif not isinstance(dtype, DatetimeTZDtype): + raise TypeError("Cannot unbox tzaware Timestamp to tznaive dtype") + elif isinstance(value, Timedelta): + value = value.to_timedelta64() + + _disallow_mismatched_datetimelike(value, dtype) + return value + + +def _disallow_mismatched_datetimelike(value, dtype: DtypeObj): + """ + numpy allows np.array(dt64values, dtype="timedelta64[ns]") and + vice-versa, but we do not want to allow this, so we need to + check explicitly + """ + vdtype = getattr(value, "dtype", None) + if vdtype is None: + return + elif (vdtype.kind == "m" and dtype.kind == "M") or ( + vdtype.kind == "M" and dtype.kind == "m" + ): + raise TypeError(f"Cannot cast {repr(value)} to {dtype}") + + +@overload +def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray: + ... + + +@overload +def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike: + ... + + +def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike: + """ + try to cast to the specified dtype (e.g. convert back to bool/int + or could be an astype of float64->float32 + """ + if isinstance(result, ABCSeries): + result = result._values + do_round = False + + if isinstance(dtype, str): + if dtype == "infer": + inferred_type = lib.infer_dtype(result, skipna=False) + if inferred_type == "boolean": + dtype = "bool" + elif inferred_type == "integer": + dtype = "int64" + elif inferred_type == "datetime64": + dtype = "datetime64[ns]" + elif inferred_type in ["timedelta", "timedelta64"]: + dtype = "timedelta64[ns]" + + # try to upcast here + elif inferred_type == "floating": + dtype = "int64" + if issubclass(result.dtype.type, np.number): + do_round = True + + else: + # TODO: complex? what if result is already non-object? + dtype = "object" + + dtype = np.dtype(dtype) + + if not isinstance(dtype, np.dtype): + # enforce our signature annotation + raise TypeError(dtype) # pragma: no cover + + converted = maybe_downcast_numeric(result, dtype, do_round) + if converted is not result: + return converted + + # a datetimelike + # GH12821, iNaT is cast to float + if dtype.kind in "mM" and result.dtype.kind in "if": + result = result.astype(dtype) + + elif dtype.kind == "m" and result.dtype == _dtype_obj: + # test_where_downcast_to_td64 + result = cast(np.ndarray, result) + result = array_to_timedelta64(result) + + elif dtype == np.dtype("M8[ns]") and result.dtype == _dtype_obj: + result = cast(np.ndarray, result) + return np.asarray(maybe_cast_to_datetime(result, dtype=dtype)) + + return result + + +@overload +def maybe_downcast_numeric( + result: np.ndarray, dtype: np.dtype, do_round: bool = False +) -> np.ndarray: + ... + + +@overload +def maybe_downcast_numeric( + result: ExtensionArray, dtype: DtypeObj, do_round: bool = False +) -> ArrayLike: + ... + + +def maybe_downcast_numeric( + result: ArrayLike, dtype: DtypeObj, do_round: bool = False +) -> ArrayLike: + """ + Subset of maybe_downcast_to_dtype restricted to numeric dtypes. + + Parameters + ---------- + result : ndarray or ExtensionArray + dtype : np.dtype or ExtensionDtype + do_round : bool + + Returns + ------- + ndarray or ExtensionArray + """ + if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype): + # e.g. SparseDtype has no itemsize attr + return result + + def trans(x): + if do_round: + return x.round() + return x + + if dtype.kind == result.dtype.kind: + # don't allow upcasts here (except if empty) + if result.dtype.itemsize <= dtype.itemsize and result.size: + return result + + if dtype.kind in "biu": + if not result.size: + # if we don't have any elements, just astype it + return trans(result).astype(dtype) + + if isinstance(result, np.ndarray): + element = result.item(0) + else: + element = result.iloc[0] + if not isinstance(element, (np.integer, np.floating, int, float, bool)): + # a comparable, e.g. a Decimal may slip in here + return result + + if ( + issubclass(result.dtype.type, (np.object_, np.number)) + and notna(result).all() + ): + new_result = trans(result).astype(dtype) + if new_result.dtype.kind == "O" or result.dtype.kind == "O": + # np.allclose may raise TypeError on object-dtype + if (new_result == result).all(): + return new_result + else: + if np.allclose(new_result, result, rtol=0): + return new_result + + elif ( + issubclass(dtype.type, np.floating) + and result.dtype.kind != "b" + and not is_string_dtype(result.dtype) + ): + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "overflow encountered in cast", RuntimeWarning + ) + new_result = result.astype(dtype) + + # Adjust tolerances based on floating point size + size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16} + + atol = size_tols.get(new_result.dtype.itemsize, 0.0) + + # Check downcast float values are still equal within 7 digits when + # converting from float64 to float32 + if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol): + return new_result + + elif dtype.kind == result.dtype.kind == "c": + new_result = result.astype(dtype) + + if np.array_equal(new_result, result, equal_nan=True): + # TODO: use tolerance like we do for float? + return new_result + + return result + + +def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT: + """ + If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit. + + Parameters + ---------- + arr : ndarray or ExtensionArray + + Returns + ------- + ndarray or ExtensionArray + """ + dtype = arr.dtype + if dtype.kind == "i" and dtype != np.int64: + return arr.astype(np.int64) + elif dtype.kind == "u" and dtype != np.uint64: + return arr.astype(np.uint64) + elif dtype.kind == "f" and dtype != np.float64: + return arr.astype(np.float64) + else: + return arr + + +def maybe_cast_pointwise_result( + result: ArrayLike, + dtype: DtypeObj, + numeric_only: bool = False, + same_dtype: bool = True, +) -> ArrayLike: + """ + Try casting result of a pointwise operation back to the original dtype if + appropriate. + + Parameters + ---------- + result : array-like + Result to cast. + dtype : np.dtype or ExtensionDtype + Input Series from which result was calculated. + numeric_only : bool, default False + Whether to cast only numerics or datetimes as well. + same_dtype : bool, default True + Specify dtype when calling _from_sequence + + Returns + ------- + result : array-like + result maybe casted to the dtype. + """ + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + if same_dtype: + result = _maybe_cast_to_extension_array(cls, result, dtype=dtype) + else: + result = _maybe_cast_to_extension_array(cls, result) + + elif (numeric_only and dtype.kind in "iufcb") or not numeric_only: + result = maybe_downcast_to_dtype(result, dtype) + + return result + + +def _maybe_cast_to_extension_array( + cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None +) -> ArrayLike: + """ + Call to `_from_sequence` that returns the object unchanged on Exception. + + Parameters + ---------- + cls : class, subclass of ExtensionArray + obj : arraylike + Values to pass to cls._from_sequence + dtype : ExtensionDtype, optional + + Returns + ------- + ExtensionArray or obj + """ + result: ArrayLike + + if dtype is not None: + try: + result = cls._from_scalars(obj, dtype=dtype) + except (TypeError, ValueError): + return obj + return result + + try: + result = cls._from_sequence(obj, dtype=dtype) + except Exception: + # We can't predict what downstream EA constructors may raise + result = obj + return result + + +@overload +def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: + ... + + +@overload +def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: + ... + + +def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj: + """ + If we have a dtype that cannot hold NA values, find the best match that can. + """ + if isinstance(dtype, ExtensionDtype): + if dtype._can_hold_na: + return dtype + elif isinstance(dtype, IntervalDtype): + # TODO(GH#45349): don't special-case IntervalDtype, allow + # overriding instead of returning object below. + return IntervalDtype(np.float64, closed=dtype.closed) + return _dtype_obj + elif dtype.kind == "b": + return _dtype_obj + elif dtype.kind in "iu": + return np.dtype(np.float64) + return dtype + + +_canonical_nans = { + np.datetime64: np.datetime64("NaT", "ns"), + np.timedelta64: np.timedelta64("NaT", "ns"), + type(np.nan): np.nan, +} + + +def maybe_promote(dtype: np.dtype, fill_value=np.nan): + """ + Find the minimal dtype that can hold both the given dtype and fill_value. + + Parameters + ---------- + dtype : np.dtype + fill_value : scalar, default np.nan + + Returns + ------- + dtype + Upcasted from dtype argument if necessary. + fill_value + Upcasted from fill_value argument if necessary. + + Raises + ------ + ValueError + If fill_value is a non-scalar and dtype is not object. + """ + orig = fill_value + orig_is_nat = False + if checknull(fill_value): + # https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740 + # avoid cache misses with NaN/NaT values that are not singletons + if fill_value is not NA: + try: + orig_is_nat = np.isnat(fill_value) + except TypeError: + pass + + fill_value = _canonical_nans.get(type(fill_value), fill_value) + + # for performance, we are using a cached version of the actual implementation + # of the function in _maybe_promote. However, this doesn't always work (in case + # of non-hashable arguments), so we fallback to the actual implementation if needed + try: + # error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type + # "Type[Any]"; expected "Hashable" [arg-type] + dtype, fill_value = _maybe_promote_cached( + dtype, fill_value, type(fill_value) # type: ignore[arg-type] + ) + except TypeError: + # if fill_value is not hashable (required for caching) + dtype, fill_value = _maybe_promote(dtype, fill_value) + + if (dtype == _dtype_obj and orig is not None) or ( + orig_is_nat and np.datetime_data(orig)[0] != "ns" + ): + # GH#51592,53497 restore our potentially non-canonical fill_value + fill_value = orig + return dtype, fill_value + + +@functools.lru_cache +def _maybe_promote_cached(dtype, fill_value, fill_value_type): + # The cached version of _maybe_promote below + # This also use fill_value_type as (unused) argument to use this in the + # cache lookup -> to differentiate 1 and True + return _maybe_promote(dtype, fill_value) + + +def _maybe_promote(dtype: np.dtype, fill_value=np.nan): + # The actual implementation of the function, use `maybe_promote` above for + # a cached version. + if not is_scalar(fill_value): + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + if dtype != object: + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + raise ValueError("fill_value must be a scalar") + dtype = _dtype_obj + return dtype, fill_value + + if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in "iufcmM": + dtype = ensure_dtype_can_hold_na(dtype) + fv = na_value_for_dtype(dtype) + return dtype, fv + + elif isinstance(dtype, CategoricalDtype): + if fill_value in dtype.categories or isna(fill_value): + return dtype, fill_value + else: + return object, ensure_object(fill_value) + + elif isna(fill_value): + dtype = _dtype_obj + if fill_value is None: + # but we retain e.g. pd.NA + fill_value = np.nan + return dtype, fill_value + + # returns tuple of (dtype, fill_value) + if issubclass(dtype.type, np.datetime64): + inferred, fv = infer_dtype_from_scalar(fill_value) + if inferred == dtype: + return dtype, fv + + from pandas.core.arrays import DatetimeArray + + dta = DatetimeArray._from_sequence([], dtype="M8[ns]") + try: + fv = dta._validate_setitem_value(fill_value) + return dta.dtype, fv + except (ValueError, TypeError): + return _dtype_obj, fill_value + + elif issubclass(dtype.type, np.timedelta64): + inferred, fv = infer_dtype_from_scalar(fill_value) + if inferred == dtype: + return dtype, fv + + elif inferred.kind == "m": + # different unit, e.g. passed np.timedelta64(24, "h") with dtype=m8[ns] + # see if we can losslessly cast it to our dtype + unit = np.datetime_data(dtype)[0] + try: + td = Timedelta(fill_value).as_unit(unit, round_ok=False) + except OutOfBoundsTimedelta: + return _dtype_obj, fill_value + else: + return dtype, td.asm8 + + return _dtype_obj, fill_value + + elif is_float(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, np.integer): + dtype = np.dtype(np.float64) + + elif dtype.kind == "f": + mst = np.min_scalar_type(fill_value) + if mst > dtype: + # e.g. mst is np.float64 and dtype is np.float32 + dtype = mst + + elif dtype.kind == "c": + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + + elif is_bool(fill_value): + if not issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif is_integer(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, np.integer): + if not np_can_cast_scalar(fill_value, dtype): # type: ignore[arg-type] + # upcast to prevent overflow + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + if dtype.kind == "f": + # Case where we disagree with numpy + dtype = np.dtype(np.object_) + + elif is_complex(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, (np.integer, np.floating)): + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + + elif dtype.kind == "c": + mst = np.min_scalar_type(fill_value) + if mst > dtype: + # e.g. mst is np.complex128 and dtype is np.complex64 + dtype = mst + + else: + dtype = np.dtype(np.object_) + + # in case we have a string that looked like a number + if issubclass(dtype.type, (bytes, str)): + dtype = np.dtype(np.object_) + + fill_value = _ensure_dtype_type(fill_value, dtype) + return dtype, fill_value + + +def _ensure_dtype_type(value, dtype: np.dtype): + """ + Ensure that the given value is an instance of the given dtype. + + e.g. if out dtype is np.complex64_, we should have an instance of that + as opposed to a python complex object. + + Parameters + ---------- + value : object + dtype : np.dtype + + Returns + ------- + object + """ + # Start with exceptions in which we do _not_ cast to numpy types + + if dtype == _dtype_obj: + return value + + # Note: before we get here we have already excluded isna(value) + return dtype.type(value) + + +def infer_dtype_from(val) -> tuple[DtypeObj, Any]: + """ + Interpret the dtype from a scalar or array. + + Parameters + ---------- + val : object + """ + if not is_list_like(val): + return infer_dtype_from_scalar(val) + return infer_dtype_from_array(val) + + +def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]: + """ + Interpret the dtype from a scalar. + + Parameters + ---------- + val : object + """ + dtype: DtypeObj = _dtype_obj + + # a 1-element ndarray + if isinstance(val, np.ndarray): + if val.ndim != 0: + msg = "invalid ndarray passed to infer_dtype_from_scalar" + raise ValueError(msg) + + dtype = val.dtype + val = lib.item_from_zerodim(val) + + elif isinstance(val, str): + # If we create an empty array using a string to infer + # the dtype, NumPy will only allocate one character per entry + # so this is kind of bad. Alternately we could use np.repeat + # instead of np.empty (but then you still don't want things + # coming out as np.str_! + + dtype = _dtype_obj + if using_pyarrow_string_dtype(): + from pandas.core.arrays.string_ import StringDtype + + dtype = StringDtype(storage="pyarrow_numpy") + + elif isinstance(val, (np.datetime64, dt.datetime)): + try: + val = Timestamp(val) + except OutOfBoundsDatetime: + return _dtype_obj, val + + if val is NaT or val.tz is None: + val = val.to_datetime64() + dtype = val.dtype + # TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes + else: + dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz) + + elif isinstance(val, (np.timedelta64, dt.timedelta)): + try: + val = Timedelta(val) + except (OutOfBoundsTimedelta, OverflowError): + dtype = _dtype_obj + else: + if val is NaT: + val = np.timedelta64("NaT", "ns") + else: + val = val.asm8 + dtype = val.dtype + + elif is_bool(val): + dtype = np.dtype(np.bool_) + + elif is_integer(val): + if isinstance(val, np.integer): + dtype = np.dtype(type(val)) + else: + dtype = np.dtype(np.int64) + + try: + np.array(val, dtype=dtype) + except OverflowError: + dtype = np.array(val).dtype + + elif is_float(val): + if isinstance(val, np.floating): + dtype = np.dtype(type(val)) + else: + dtype = np.dtype(np.float64) + + elif is_complex(val): + dtype = np.dtype(np.complex128) + + if isinstance(val, Period): + dtype = PeriodDtype(freq=val.freq) + elif isinstance(val, Interval): + subtype = infer_dtype_from_scalar(val.left)[0] + dtype = IntervalDtype(subtype=subtype, closed=val.closed) + + return dtype, val + + +def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]: + """ + Convert datetimelike-keyed dicts to a Timestamp-keyed dict. + + Parameters + ---------- + d: dict-like object + + Returns + ------- + dict + """ + return {maybe_box_datetimelike(key): value for key, value in d.items()} + + +def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]: + """ + Infer the dtype from an array. + + Parameters + ---------- + arr : array + + Returns + ------- + tuple (pandas-compat dtype, array) + + + Examples + -------- + >>> np.asarray([1, '1']) + array(['1', '1'], dtype='>> infer_dtype_from_array([1, '1']) + (dtype('O'), [1, '1']) + """ + if isinstance(arr, np.ndarray): + return arr.dtype, arr + + if not is_list_like(arr): + raise TypeError("'arr' must be list-like") + + arr_dtype = getattr(arr, "dtype", None) + if isinstance(arr_dtype, ExtensionDtype): + return arr.dtype, arr + + elif isinstance(arr, ABCSeries): + return arr.dtype, np.asarray(arr) + + # don't force numpy coerce with nan's + inferred = lib.infer_dtype(arr, skipna=False) + if inferred in ["string", "bytes", "mixed", "mixed-integer"]: + return (np.dtype(np.object_), arr) + + arr = np.asarray(arr) + return arr.dtype, arr + + +def _maybe_infer_dtype_type(element): + """ + Try to infer an object's dtype, for use in arithmetic ops. + + Uses `element.dtype` if that's available. + Objects implementing the iterator protocol are cast to a NumPy array, + and from there the array's type is used. + + Parameters + ---------- + element : object + Possibly has a `.dtype` attribute, and possibly the iterator + protocol. + + Returns + ------- + tipo : type + + Examples + -------- + >>> from collections import namedtuple + >>> Foo = namedtuple("Foo", "dtype") + >>> _maybe_infer_dtype_type(Foo(np.dtype("i8"))) + dtype('int64') + """ + tipo = None + if hasattr(element, "dtype"): + tipo = element.dtype + elif is_list_like(element): + element = np.asarray(element) + tipo = element.dtype + return tipo + + +def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None: + """ + Change string like dtypes to object for + ``DataFrame.select_dtypes()``. + """ + # error: Argument 1 to has incompatible type "Type[generic]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + # error: Argument 2 to has incompatible type "Type[generic]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + non_string_dtypes = dtype_set - { + np.dtype("S").type, # type: ignore[arg-type] + np.dtype(" np.ndarray: + """coerce the indexer input array to the smallest dtype possible""" + length = len(categories) + if length < _int8_max: + return ensure_int8(indexer) + elif length < _int16_max: + return ensure_int16(indexer) + elif length < _int32_max: + return ensure_int32(indexer) + return ensure_int64(indexer) + + +def convert_dtypes( + input_array: ArrayLike, + convert_string: bool = True, + convert_integer: bool = True, + convert_boolean: bool = True, + convert_floating: bool = True, + infer_objects: bool = False, + dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable", +) -> DtypeObj: + """ + Convert objects to best possible type, and optionally, + to types supporting ``pd.NA``. + + Parameters + ---------- + input_array : ExtensionArray or np.ndarray + convert_string : bool, default True + Whether object dtypes should be converted to ``StringDtype()``. + convert_integer : bool, default True + Whether, if possible, conversion can be done to integer extension types. + convert_boolean : bool, defaults True + Whether object dtypes should be converted to ``BooleanDtypes()``. + convert_floating : bool, defaults True + Whether, if possible, conversion can be done to floating extension types. + If `convert_integer` is also True, preference will be give to integer + dtypes if the floats can be faithfully casted to integers. + infer_objects : bool, defaults False + Whether to also infer objects to float/int if possible. Is only hit if the + object array contains pd.NA. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + np.dtype, or ExtensionDtype + """ + inferred_dtype: str | DtypeObj + + if ( + convert_string or convert_integer or convert_boolean or convert_floating + ) and isinstance(input_array, np.ndarray): + if input_array.dtype == object: + inferred_dtype = lib.infer_dtype(input_array) + else: + inferred_dtype = input_array.dtype + + if is_string_dtype(inferred_dtype): + if not convert_string or inferred_dtype == "bytes": + inferred_dtype = input_array.dtype + else: + inferred_dtype = pandas_dtype_func("string") + + if convert_integer: + target_int_dtype = pandas_dtype_func("Int64") + + if input_array.dtype.kind in "iu": + from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE + + inferred_dtype = NUMPY_INT_TO_DTYPE.get( + input_array.dtype, target_int_dtype + ) + elif input_array.dtype.kind in "fcb": + # TODO: de-dup with maybe_cast_to_integer_array? + arr = input_array[notna(input_array)] + if (arr.astype(int) == arr).all(): + inferred_dtype = target_int_dtype + else: + inferred_dtype = input_array.dtype + elif ( + infer_objects + and input_array.dtype == object + and (isinstance(inferred_dtype, str) and inferred_dtype == "integer") + ): + inferred_dtype = target_int_dtype + + if convert_floating: + if input_array.dtype.kind in "fcb": + # i.e. numeric but not integer + from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE + + inferred_float_dtype: DtypeObj = NUMPY_FLOAT_TO_DTYPE.get( + input_array.dtype, pandas_dtype_func("Float64") + ) + # if we could also convert to integer, check if all floats + # are actually integers + if convert_integer: + # TODO: de-dup with maybe_cast_to_integer_array? + arr = input_array[notna(input_array)] + if (arr.astype(int) == arr).all(): + inferred_dtype = pandas_dtype_func("Int64") + else: + inferred_dtype = inferred_float_dtype + else: + inferred_dtype = inferred_float_dtype + elif ( + infer_objects + and input_array.dtype == object + and ( + isinstance(inferred_dtype, str) + and inferred_dtype == "mixed-integer-float" + ) + ): + inferred_dtype = pandas_dtype_func("Float64") + + if convert_boolean: + if input_array.dtype.kind == "b": + inferred_dtype = pandas_dtype_func("boolean") + elif isinstance(inferred_dtype, str) and inferred_dtype == "boolean": + inferred_dtype = pandas_dtype_func("boolean") + + if isinstance(inferred_dtype, str): + # If we couldn't do anything else, then we retain the dtype + inferred_dtype = input_array.dtype + + else: + inferred_dtype = input_array.dtype + + if dtype_backend == "pyarrow": + from pandas.core.arrays.arrow.array import to_pyarrow_type + from pandas.core.arrays.string_ import StringDtype + + assert not isinstance(inferred_dtype, str) + + if ( + (convert_integer and inferred_dtype.kind in "iu") + or (convert_floating and inferred_dtype.kind in "fc") + or (convert_boolean and inferred_dtype.kind == "b") + or (convert_string and isinstance(inferred_dtype, StringDtype)) + or ( + inferred_dtype.kind not in "iufcb" + and not isinstance(inferred_dtype, StringDtype) + ) + ): + if isinstance(inferred_dtype, PandasExtensionDtype) and not isinstance( + inferred_dtype, DatetimeTZDtype + ): + base_dtype = inferred_dtype.base + elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)): + base_dtype = inferred_dtype.numpy_dtype + elif isinstance(inferred_dtype, StringDtype): + base_dtype = np.dtype(str) + else: + base_dtype = inferred_dtype + if ( + base_dtype.kind == "O" # type: ignore[union-attr] + and input_array.size > 0 + and isna(input_array).all() + ): + import pyarrow as pa + + pa_type = pa.null() + else: + pa_type = to_pyarrow_type(base_dtype) + if pa_type is not None: + inferred_dtype = ArrowDtype(pa_type) + elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype): + # GH 53648 + inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype] + + # error: Incompatible return value type (got "Union[str, Union[dtype[Any], + # ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]") + return inferred_dtype # type: ignore[return-value] + + +def maybe_infer_to_datetimelike( + value: npt.NDArray[np.object_], +) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray: + """ + we might have a array (or single object) that is datetime like, + and no dtype is passed don't change the value unless we find a + datetime/timedelta set + + this is pretty strict in that a datetime/timedelta is REQUIRED + in addition to possible nulls/string likes + + Parameters + ---------- + value : np.ndarray[object] + + Returns + ------- + np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray + + """ + if not isinstance(value, np.ndarray) or value.dtype != object: + # Caller is responsible for passing only ndarray[object] + raise TypeError(type(value)) # pragma: no cover + if value.ndim != 1: + # Caller is responsible + raise ValueError(value.ndim) # pragma: no cover + + if not len(value): + return value + + # error: Incompatible return value type (got "Union[ExtensionArray, + # ndarray[Any, Any]]", expected "Union[ndarray[Any, Any], DatetimeArray, + # TimedeltaArray, PeriodArray, IntervalArray]") + return lib.maybe_convert_objects( # type: ignore[return-value] + value, + # Here we do not convert numeric dtypes, as if we wanted that, + # numpy would have done it for us. + convert_numeric=False, + convert_non_numeric=True, + dtype_if_all_nat=np.dtype("M8[ns]"), + ) + + +def maybe_cast_to_datetime( + value: np.ndarray | list, dtype: np.dtype +) -> ExtensionArray | np.ndarray: + """ + try to cast the array/value to a datetimelike dtype, converting float + nan to iNaT + + Caller is responsible for handling ExtensionDtype cases and non dt64/td64 + cases. + """ + from pandas.core.arrays.datetimes import DatetimeArray + from pandas.core.arrays.timedeltas import TimedeltaArray + + assert dtype.kind in "mM" + if not is_list_like(value): + raise TypeError("value must be listlike") + + # TODO: _from_sequence would raise ValueError in cases where + # _ensure_nanosecond_dtype raises TypeError + _ensure_nanosecond_dtype(dtype) + + if lib.is_np_dtype(dtype, "m"): + res = TimedeltaArray._from_sequence(value, dtype=dtype) + return res + else: + try: + dta = DatetimeArray._from_sequence(value, dtype=dtype) + except ValueError as err: + # We can give a Series-specific exception message. + if "cannot supply both a tz and a timezone-naive dtype" in str(err): + raise ValueError( + "Cannot convert timezone-aware data to " + "timezone-naive dtype. Use " + "pd.Series(values).dt.tz_localize(None) instead." + ) from err + raise + + return dta + + +def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None: + """ + Convert dtypes with granularity less than nanosecond to nanosecond + + >>> _ensure_nanosecond_dtype(np.dtype("M8[us]")) + + >>> _ensure_nanosecond_dtype(np.dtype("M8[D]")) + Traceback (most recent call last): + ... + TypeError: dtype=datetime64[D] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns' + + >>> _ensure_nanosecond_dtype(np.dtype("m8[ps]")) + Traceback (most recent call last): + ... + TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns' + """ # noqa: E501 + msg = ( + f"The '{dtype.name}' dtype has no unit. " + f"Please pass in '{dtype.name}[ns]' instead." + ) + + # unpack e.g. SparseDtype + dtype = getattr(dtype, "subtype", dtype) + + if not isinstance(dtype, np.dtype): + # i.e. datetime64tz + pass + + elif dtype.kind in "mM": + if not is_supported_dtype(dtype): + # pre-2.0 we would silently swap in nanos for lower-resolutions, + # raise for above-nano resolutions + if dtype.name in ["datetime64", "timedelta64"]: + raise ValueError(msg) + # TODO: ValueError or TypeError? existing test + # test_constructor_generic_timestamp_bad_frequency expects TypeError + raise TypeError( + f"dtype={dtype} is not supported. Supported resolutions are 's', " + "'ms', 'us', and 'ns'" + ) + + +# TODO: other value-dependent functions to standardize here include +# Index._find_common_type_compat +def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj: + """ + Find the type/dtype for the result of an operation between objects. + + This is similar to find_common_type, but looks at the right object instead + of just its dtype. This can be useful in particular when the right + object does not have a `dtype`. + + Parameters + ---------- + left_dtype : np.dtype or ExtensionDtype + right : Any + + Returns + ------- + np.dtype or ExtensionDtype + + See also + -------- + find_common_type + numpy.result_type + """ + new_dtype: DtypeObj + + if ( + isinstance(left_dtype, np.dtype) + and left_dtype.kind in "iuc" + and (lib.is_integer(right) or lib.is_float(right)) + ): + # e.g. with int8 dtype and right=512, we want to end up with + # np.int16, whereas infer_dtype_from(512) gives np.int64, + # which will make us upcast too far. + if lib.is_float(right) and right.is_integer() and left_dtype.kind != "f": + right = int(right) + # After NEP 50, numpy won't inspect Python scalars + # TODO: do we need to recreate numpy's inspection logic for floats too + # (this breaks some tests) + if isinstance(right, int) and not isinstance(right, np.integer): + # This gives an unsigned type by default + # (if our number is positive) + + # If our left dtype is signed, we might not want this since + # this might give us 1 dtype too big + # We should check if the corresponding int dtype (e.g. int64 for uint64) + # can hold the number + right_dtype = np.min_scalar_type(right) + if right == 0: + # Special case 0 + right = left_dtype + elif ( + not np.issubdtype(left_dtype, np.unsignedinteger) + and 0 < right <= np.iinfo(right_dtype).max + ): + # If left dtype isn't unsigned, check if it fits in the signed dtype + right = np.dtype(f"i{right_dtype.itemsize}") + else: + right = right_dtype + + new_dtype = np.result_type(left_dtype, right) + + elif is_valid_na_for_dtype(right, left_dtype): + # e.g. IntervalDtype[int] and None/np.nan + new_dtype = ensure_dtype_can_hold_na(left_dtype) + + else: + dtype, _ = infer_dtype_from(right) + new_dtype = find_common_type([left_dtype, dtype]) + + return new_dtype + + +def common_dtype_categorical_compat( + objs: Sequence[Index | ArrayLike], dtype: DtypeObj +) -> DtypeObj: + """ + Update the result of find_common_type to account for NAs in a Categorical. + + Parameters + ---------- + objs : list[np.ndarray | ExtensionArray | Index] + dtype : np.dtype or ExtensionDtype + + Returns + ------- + np.dtype or ExtensionDtype + """ + # GH#38240 + + # TODO: more generally, could do `not can_hold_na(dtype)` + if lib.is_np_dtype(dtype, "iu"): + for obj in objs: + # We don't want to accientally allow e.g. "categorical" str here + obj_dtype = getattr(obj, "dtype", None) + if isinstance(obj_dtype, CategoricalDtype): + if isinstance(obj, ABCIndex): + # This check may already be cached + hasnas = obj.hasnans + else: + # Categorical + hasnas = cast("Categorical", obj)._hasna + + if hasnas: + # see test_union_int_categorical_with_nan + dtype = np.dtype(np.float64) + break + return dtype + + +def np_find_common_type(*dtypes: np.dtype) -> np.dtype: + """ + np.find_common_type implementation pre-1.25 deprecation using np.result_type + https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065 + + Parameters + ---------- + dtypes : np.dtypes + + Returns + ------- + np.dtype + """ + try: + common_dtype = np.result_type(*dtypes) + if common_dtype.kind in "mMSU": + # NumPy promotion currently (1.25) misbehaves for for times and strings, + # so fall back to object (find_common_dtype did unless there + # was only one dtype) + common_dtype = np.dtype("O") + + except TypeError: + common_dtype = np.dtype("O") + return common_dtype + + +@overload +def find_common_type(types: list[np.dtype]) -> np.dtype: + ... + + +@overload +def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: + ... + + +@overload +def find_common_type(types: list[DtypeObj]) -> DtypeObj: + ... + + +def find_common_type(types): + """ + Find a common data type among the given dtypes. + + Parameters + ---------- + types : list of dtypes + + Returns + ------- + pandas extension or numpy dtype + + See Also + -------- + numpy.find_common_type + + """ + if not types: + raise ValueError("no types given") + + first = types[0] + + # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) + # => object + if lib.dtypes_all_equal(list(types)): + return first + + # get unique types (dict.fromkeys is used as order-preserving set()) + types = list(dict.fromkeys(types).keys()) + + if any(isinstance(t, ExtensionDtype) for t in types): + for t in types: + if isinstance(t, ExtensionDtype): + res = t._get_common_dtype(types) + if res is not None: + return res + return np.dtype("object") + + # take lowest unit + if all(lib.is_np_dtype(t, "M") for t in types): + return np.dtype(max(types)) + if all(lib.is_np_dtype(t, "m") for t in types): + return np.dtype(max(types)) + + # don't mix bool / int or float or complex + # this is different from numpy, which casts bool with float/int as int + has_bools = any(t.kind == "b" for t in types) + if has_bools: + for t in types: + if t.kind in "iufc": + return np.dtype("object") + + return np_find_common_type(*types) + + +def construct_2d_arraylike_from_scalar( + value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool +) -> np.ndarray: + shape = (length, width) + + if dtype.kind in "mM": + value = _maybe_box_and_unbox_datetimelike(value, dtype) + elif dtype == _dtype_obj: + if isinstance(value, (np.timedelta64, np.datetime64)): + # calling np.array below would cast to pytimedelta/pydatetime + out = np.empty(shape, dtype=object) + out.fill(value) + return out + + # Attempt to coerce to a numpy array + try: + if not copy: + arr = np.asarray(value, dtype=dtype) + else: + arr = np.array(value, dtype=dtype, copy=copy) + except (ValueError, TypeError) as err: + raise TypeError( + f"DataFrame constructor called with incompatible data and dtype: {err}" + ) from err + + if arr.ndim != 0: + raise ValueError("DataFrame constructor not properly called!") + + return np.full(shape, arr) + + +def construct_1d_arraylike_from_scalar( + value: Scalar, length: int, dtype: DtypeObj | None +) -> ArrayLike: + """ + create a np.ndarray / pandas type of specified shape and dtype + filled with values + + Parameters + ---------- + value : scalar value + length : int + dtype : pandas_dtype or np.dtype + + Returns + ------- + np.ndarray / pandas type of length, filled with value + + """ + + if dtype is None: + try: + dtype, value = infer_dtype_from_scalar(value) + except OutOfBoundsDatetime: + dtype = _dtype_obj + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + seq = [] if length == 0 else [value] + subarr = cls._from_sequence(seq, dtype=dtype).repeat(length) + + else: + if length and dtype.kind in "iu" and isna(value): + # coerce if we have nan for an integer dtype + dtype = np.dtype("float64") + elif lib.is_np_dtype(dtype, "US"): + # we need to coerce to object dtype to avoid + # to allow numpy to take our string as a scalar value + dtype = np.dtype("object") + if not isna(value): + value = ensure_str(value) + elif dtype.kind in "mM": + value = _maybe_box_and_unbox_datetimelike(value, dtype) + + subarr = np.empty(length, dtype=dtype) + if length: + # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes + subarr.fill(value) + + return subarr + + +def _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj): + # Caller is responsible for checking dtype.kind in "mM" + + if isinstance(value, dt.datetime): + # we dont want to box dt64, in particular datetime64("NaT") + value = maybe_box_datetimelike(value, dtype) + + return _maybe_unbox_datetimelike(value, dtype) + + +def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray: + """ + Transform any list-like object in a 1-dimensional numpy array of object + dtype. + + Parameters + ---------- + values : any iterable which has a len() + + Raises + ------ + TypeError + * If `values` does not have a len() + + Returns + ------- + 1-dimensional numpy array of dtype object + """ + # numpy will try to interpret nested lists as further dimensions, hence + # making a 1D array that contains list-likes is a bit tricky: + result = np.empty(len(values), dtype="object") + result[:] = values + return result + + +def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray: + """ + Takes any dtype and returns the casted version, raising for when data is + incompatible with integer/unsigned integer dtypes. + + Parameters + ---------- + arr : np.ndarray or list + The array to cast. + dtype : np.dtype + The integer dtype to cast the array to. + + Returns + ------- + ndarray + Array of integer or unsigned integer dtype. + + Raises + ------ + OverflowError : the dtype is incompatible with the data + ValueError : loss of precision has occurred during casting + + Examples + -------- + If you try to coerce negative values to unsigned integers, it raises: + + >>> pd.Series([-1], dtype="uint64") + Traceback (most recent call last): + ... + OverflowError: Trying to coerce negative values to unsigned integers + + Also, if you try to coerce float values to integers, it raises: + + >>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64")) + Traceback (most recent call last): + ... + ValueError: Trying to coerce float values to integers + """ + assert dtype.kind in "iu" + + try: + if not isinstance(arr, np.ndarray): + with warnings.catch_warnings(): + # We already disallow dtype=uint w/ negative numbers + # (test_constructor_coercion_signed_to_unsigned) so safe to ignore. + if not np_version_gt2: + warnings.filterwarnings( + "ignore", + "NumPy will stop allowing conversion of " + "out-of-bound Python int", + DeprecationWarning, + ) + casted = np.asarray(arr, dtype=dtype) + else: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + casted = arr.astype(dtype, copy=False) + except OverflowError as err: + raise OverflowError( + "The elements provided in the data cannot all be " + f"casted to the dtype {dtype}" + ) from err + + if isinstance(arr, np.ndarray) and arr.dtype == dtype: + # avoid expensive array_equal check + return casted + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + warnings.filterwarnings( + "ignore", "elementwise comparison failed", FutureWarning + ) + if np.array_equal(arr, casted): + return casted + + # We do this casting to allow for proper + # data and dtype checking. + # + # We didn't do this earlier because NumPy + # doesn't handle `uint64` correctly. + arr = np.asarray(arr) + + if np.issubdtype(arr.dtype, str): + # TODO(numpy-2.0 min): This case will raise an OverflowError above + if (casted.astype(str) == arr).all(): + return casted + raise ValueError(f"string values cannot be losslessly cast to {dtype}") + + if dtype.kind == "u" and (arr < 0).any(): + # TODO: can this be hit anymore after numpy 2.0? + raise OverflowError("Trying to coerce negative values to unsigned integers") + + if arr.dtype.kind == "f": + if not np.isfinite(arr).all(): + raise IntCastingNaNError( + "Cannot convert non-finite values (NA or inf) to integer" + ) + raise ValueError("Trying to coerce float values to integers") + if arr.dtype == object: + raise ValueError("Trying to coerce float values to integers") + + if casted.dtype < arr.dtype: + # TODO: Can this path be hit anymore with numpy > 2 + # GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows + raise ValueError( + f"Values are too large to be losslessly converted to {dtype}. " + f"To cast anyway, use pd.Series(values).astype({dtype})" + ) + + if arr.dtype.kind in "mM": + # test_constructor_maskedarray_nonfloat + raise TypeError( + f"Constructing a Series or DataFrame from {arr.dtype} values and " + f"dtype={dtype} is not supported. Use values.view({dtype}) instead." + ) + + # No known cases that get here, but raising explicitly to cover our bases. + raise ValueError(f"values cannot be losslessly cast to {dtype}") + + +def can_hold_element(arr: ArrayLike, element: Any) -> bool: + """ + Can we do an inplace setitem with this element in an array with this dtype? + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + element : Any + + Returns + ------- + bool + """ + dtype = arr.dtype + if not isinstance(dtype, np.dtype) or dtype.kind in "mM": + if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)): + # np.dtype here catches datetime64ns and timedelta64ns; we assume + # in this case that we have DatetimeArray/TimedeltaArray + arr = cast( + "PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr + ) + try: + arr._validate_setitem_value(element) + return True + except (ValueError, TypeError): + return False + + # This is technically incorrect, but maintains the behavior of + # ExtensionBlock._can_hold_element + return True + + try: + np_can_hold_element(dtype, element) + return True + except (TypeError, LossySetitemError): + return False + + +def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: + """ + Raise if we cannot losslessly set this element into an ndarray with this dtype. + + Specifically about places where we disagree with numpy. i.e. there are + cases where numpy will raise in doing the setitem that we do not check + for here, e.g. setting str "X" into a numeric ndarray. + + Returns + ------- + Any + The element, potentially cast to the dtype. + + Raises + ------ + ValueError : If we cannot losslessly store this element with this dtype. + """ + if dtype == _dtype_obj: + return element + + tipo = _maybe_infer_dtype_type(element) + + if dtype.kind in "iu": + if isinstance(element, range): + if _dtype_can_hold_range(element, dtype): + return element + raise LossySetitemError + + if is_integer(element) or (is_float(element) and element.is_integer()): + # e.g. test_setitem_series_int8 if we have a python int 1 + # tipo may be np.int32, despite the fact that it will fit + # in smaller int dtypes. + info = np.iinfo(dtype) + if info.min <= element <= info.max: + return dtype.type(element) + raise LossySetitemError + + if tipo is not None: + if tipo.kind not in "iu": + if isinstance(element, np.ndarray) and element.dtype.kind == "f": + # If all can be losslessly cast to integers, then we can hold them + with np.errstate(invalid="ignore"): + # We check afterwards if cast was losslessly, so no need to show + # the warning + casted = element.astype(dtype) + comp = casted == element + if comp.all(): + # Return the casted values bc they can be passed to + # np.putmask, whereas the raw values cannot. + # see TestSetitemFloatNDarrayIntoIntegerSeries + return casted + raise LossySetitemError + + elif isinstance(element, ABCExtensionArray) and isinstance( + element.dtype, CategoricalDtype + ): + # GH#52927 setting Categorical value into non-EA frame + # TODO: general-case for EAs? + try: + casted = element.astype(dtype) + except (ValueError, TypeError): + raise LossySetitemError + # Check for cases of either + # a) lossy overflow/rounding or + # b) semantic changes like dt64->int64 + comp = casted == element + if not comp.all(): + raise LossySetitemError + return casted + + # Anything other than integer we cannot hold + raise LossySetitemError + if ( + dtype.kind == "u" + and isinstance(element, np.ndarray) + and element.dtype.kind == "i" + ): + # see test_where_uint64 + casted = element.astype(dtype) + if (casted == element).all(): + # TODO: faster to check (element >=0).all()? potential + # itemsize issues there? + return casted + raise LossySetitemError + if dtype.itemsize < tipo.itemsize: + raise LossySetitemError + if not isinstance(tipo, np.dtype): + # i.e. nullable IntegerDtype; we can put this into an ndarray + # losslessly iff it has no NAs + arr = element._values if isinstance(element, ABCSeries) else element + if arr._hasna: + raise LossySetitemError + return element + + return element + + raise LossySetitemError + + if dtype.kind == "f": + if lib.is_integer(element) or lib.is_float(element): + casted = dtype.type(element) + if np.isnan(casted) or casted == element: + return casted + # otherwise e.g. overflow see TestCoercionFloat32 + raise LossySetitemError + + if tipo is not None: + # TODO: itemsize check? + if tipo.kind not in "iuf": + # Anything other than float/integer we cannot hold + raise LossySetitemError + if not isinstance(tipo, np.dtype): + # i.e. nullable IntegerDtype or FloatingDtype; + # we can put this into an ndarray losslessly iff it has no NAs + if element._hasna: + raise LossySetitemError + return element + elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind: + if isinstance(element, np.ndarray): + # e.g. TestDataFrameIndexingWhere::test_where_alignment + casted = element.astype(dtype) + if np.array_equal(casted, element, equal_nan=True): + return casted + raise LossySetitemError + + return element + + raise LossySetitemError + + if dtype.kind == "c": + if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element): + if np.isnan(element): + # see test_where_complex GH#6345 + return dtype.type(element) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + casted = dtype.type(element) + if casted == element: + return casted + # otherwise e.g. overflow see test_32878_complex_itemsize + raise LossySetitemError + + if tipo is not None: + if tipo.kind in "iufc": + return element + raise LossySetitemError + raise LossySetitemError + + if dtype.kind == "b": + if tipo is not None: + if tipo.kind == "b": + if not isinstance(tipo, np.dtype): + # i.e. we have a BooleanArray + if element._hasna: + # i.e. there are pd.NA elements + raise LossySetitemError + return element + raise LossySetitemError + if lib.is_bool(element): + return element + raise LossySetitemError + + if dtype.kind == "S": + # TODO: test tests.frame.methods.test_replace tests get here, + # need more targeted tests. xref phofl has a PR about this + if tipo is not None: + if tipo.kind == "S" and tipo.itemsize <= dtype.itemsize: + return element + raise LossySetitemError + if isinstance(element, bytes) and len(element) <= dtype.itemsize: + return element + raise LossySetitemError + + if dtype.kind == "V": + # i.e. np.void, which cannot hold _anything_ + raise LossySetitemError + + raise NotImplementedError(dtype) + + +def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool: + """ + _maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints), + but in many cases a range can be held by a smaller integer dtype. + Check if this is one of those cases. + """ + if not len(rng): + return True + return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype) + + +def np_can_cast_scalar(element: Scalar, dtype: np.dtype) -> bool: + """ + np.can_cast pandas-equivalent for pre 2-0 behavior that allowed scalar + inference + + Parameters + ---------- + element : Scalar + dtype : np.dtype + + Returns + ------- + bool + """ + try: + np_can_hold_element(dtype, element) + return True + except (LossySetitemError, NotImplementedError): + return False diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/missing.py b/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/missing.py new file mode 100644 index 0000000000000000000000000000000000000000..c341ff9dff7e613d8db2209efb5c10f170a9cd47 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/dtypes/missing.py @@ -0,0 +1,810 @@ +""" +missing types & inference +""" +from __future__ import annotations + +from decimal import Decimal +from functools import partial +from typing import ( + TYPE_CHECKING, + overload, +) +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas._libs import lib +import pandas._libs.missing as libmissing +from pandas._libs.tslibs import ( + NaT, + iNaT, +) + +from pandas.core.dtypes.common import ( + DT64NS_DTYPE, + TD64NS_DTYPE, + ensure_object, + is_scalar, + is_string_or_object_np_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCExtensionArray, + ABCIndex, + ABCMultiIndex, + ABCSeries, +) +from pandas.core.dtypes.inference import is_list_like + +if TYPE_CHECKING: + from re import Pattern + + from pandas._typing import ( + ArrayLike, + DtypeObj, + NDFrame, + NDFrameT, + Scalar, + npt, + ) + + from pandas import Series + from pandas.core.indexes.base import Index + + +isposinf_scalar = libmissing.isposinf_scalar +isneginf_scalar = libmissing.isneginf_scalar + +nan_checker = np.isnan +INF_AS_NA = False +_dtype_object = np.dtype("object") +_dtype_str = np.dtype(str) + + +@overload +def isna(obj: Scalar | Pattern) -> bool: + ... + + +@overload +def isna( + obj: ArrayLike | Index | list, +) -> npt.NDArray[np.bool_]: + ... + + +@overload +def isna(obj: NDFrameT) -> NDFrameT: + ... + + +# handle unions +@overload +def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: + ... + + +@overload +def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + ... + + +def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + """ + Detect missing values for an array-like object. + + This function takes a scalar or array-like object and indicates + whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` + in object arrays, ``NaT`` in datetimelike). + + Parameters + ---------- + obj : scalar or array-like + Object to check for null or missing values. + + Returns + ------- + bool or array-like of bool + For scalar input, returns a scalar boolean. + For array input, returns an array of boolean indicating whether each + corresponding element is missing. + + See Also + -------- + notna : Boolean inverse of pandas.isna. + Series.isna : Detect missing values in a Series. + DataFrame.isna : Detect missing values in a DataFrame. + Index.isna : Detect missing values in an Index. + + Examples + -------- + Scalar arguments (including strings) result in a scalar boolean. + + >>> pd.isna('dog') + False + + >>> pd.isna(pd.NA) + True + + >>> pd.isna(np.nan) + True + + ndarrays result in an ndarray of booleans. + + >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) + >>> array + array([[ 1., nan, 3.], + [ 4., 5., nan]]) + >>> pd.isna(array) + array([[False, True, False], + [False, False, True]]) + + For indexes, an ndarray of booleans is returned. + + >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, + ... "2017-07-08"]) + >>> index + DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], + dtype='datetime64[ns]', freq=None) + >>> pd.isna(index) + array([False, False, True, False]) + + For Series and DataFrame, the same type is returned, containing booleans. + + >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) + >>> df + 0 1 2 + 0 ant bee cat + 1 dog None fly + >>> pd.isna(df) + 0 1 2 + 0 False False False + 1 False True False + + >>> pd.isna(df[1]) + 0 False + 1 True + Name: 1, dtype: bool + """ + return _isna(obj) + + +isnull = isna + + +def _isna(obj, inf_as_na: bool = False): + """ + Detect missing values, treating None, NaN or NA as null. Infinite + values will also be treated as null if inf_as_na is True. + + Parameters + ---------- + obj: ndarray or object value + Input array or scalar value. + inf_as_na: bool + Whether to treat infinity as null. + + Returns + ------- + boolean ndarray or boolean + """ + if is_scalar(obj): + return libmissing.checknull(obj, inf_as_na=inf_as_na) + elif isinstance(obj, ABCMultiIndex): + raise NotImplementedError("isna is not defined for MultiIndex") + elif isinstance(obj, type): + return False + elif isinstance(obj, (np.ndarray, ABCExtensionArray)): + return _isna_array(obj, inf_as_na=inf_as_na) + elif isinstance(obj, ABCIndex): + # Try to use cached isna, which also short-circuits for integer dtypes + # and avoids materializing RangeIndex._values + if not obj._can_hold_na: + return obj.isna() + return _isna_array(obj._values, inf_as_na=inf_as_na) + + elif isinstance(obj, ABCSeries): + result = _isna_array(obj._values, inf_as_na=inf_as_na) + # box + result = obj._constructor(result, index=obj.index, name=obj.name, copy=False) + return result + elif isinstance(obj, ABCDataFrame): + return obj.isna() + elif isinstance(obj, list): + return _isna_array(np.asarray(obj, dtype=object), inf_as_na=inf_as_na) + elif hasattr(obj, "__array__"): + return _isna_array(np.asarray(obj), inf_as_na=inf_as_na) + else: + return False + + +def _use_inf_as_na(key) -> None: + """ + Option change callback for na/inf behaviour. + + Choose which replacement for numpy.isnan / -numpy.isfinite is used. + + Parameters + ---------- + flag: bool + True means treat None, NaN, INF, -INF as null (old way), + False means None and NaN are null, but INF, -INF are not null + (new way). + + Notes + ----- + This approach to setting global module values is discussed and + approved here: + + * https://stackoverflow.com/questions/4859217/ + programmatically-creating-variables-in-python/4859312#4859312 + """ + inf_as_na = get_option(key) + globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na) + if inf_as_na: + globals()["nan_checker"] = lambda x: ~np.isfinite(x) + globals()["INF_AS_NA"] = True + else: + globals()["nan_checker"] = np.isnan + globals()["INF_AS_NA"] = False + + +def _isna_array(values: ArrayLike, inf_as_na: bool = False): + """ + Return an array indicating which values of the input array are NaN / NA. + + Parameters + ---------- + obj: ndarray or ExtensionArray + The input array whose elements are to be checked. + inf_as_na: bool + Whether or not to treat infinite values as NA. + + Returns + ------- + array-like + Array of boolean values denoting the NA status of each element. + """ + dtype = values.dtype + + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + if inf_as_na and isinstance(dtype, CategoricalDtype): + result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na) + else: + # error: Incompatible types in assignment (expression has type + # "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has + # type "ndarray[Any, dtype[bool_]]") + result = values.isna() # type: ignore[assignment] + elif isinstance(values, np.rec.recarray): + # GH 48526 + result = _isna_recarray_dtype(values, inf_as_na=inf_as_na) + elif is_string_or_object_np_dtype(values.dtype): + result = _isna_string_dtype(values, inf_as_na=inf_as_na) + elif dtype.kind in "mM": + # this is the NaT pattern + result = values.view("i8") == iNaT + else: + if inf_as_na: + result = ~np.isfinite(values) + else: + result = np.isnan(values) + + return result + + +def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> npt.NDArray[np.bool_]: + # Working around NumPy ticket 1542 + dtype = values.dtype + + if dtype.kind in ("S", "U"): + result = np.zeros(values.shape, dtype=bool) + else: + if values.ndim in {1, 2}: + result = libmissing.isnaobj(values, inf_as_na=inf_as_na) + else: + # 0-D, reached via e.g. mask_missing + result = libmissing.isnaobj(values.ravel(), inf_as_na=inf_as_na) + result = result.reshape(values.shape) + + return result + + +def _has_record_inf_value(record_as_array: np.ndarray) -> np.bool_: + is_inf_in_record = np.zeros(len(record_as_array), dtype=bool) + for i, value in enumerate(record_as_array): + is_element_inf = False + try: + is_element_inf = np.isinf(value) + except TypeError: + is_element_inf = False + is_inf_in_record[i] = is_element_inf + + return np.any(is_inf_in_record) + + +def _isna_recarray_dtype( + values: np.rec.recarray, inf_as_na: bool +) -> npt.NDArray[np.bool_]: + result = np.zeros(values.shape, dtype=bool) + for i, record in enumerate(values): + record_as_array = np.array(record.tolist()) + does_record_contain_nan = isna_all(record_as_array) + does_record_contain_inf = False + if inf_as_na: + does_record_contain_inf = bool(_has_record_inf_value(record_as_array)) + result[i] = np.any( + np.logical_or(does_record_contain_nan, does_record_contain_inf) + ) + + return result + + +@overload +def notna(obj: Scalar) -> bool: + ... + + +@overload +def notna( + obj: ArrayLike | Index | list, +) -> npt.NDArray[np.bool_]: + ... + + +@overload +def notna(obj: NDFrameT) -> NDFrameT: + ... + + +# handle unions +@overload +def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: + ... + + +@overload +def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + ... + + +def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + """ + Detect non-missing values for an array-like object. + + This function takes a scalar or array-like object and indicates + whether values are valid (not missing, which is ``NaN`` in numeric + arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). + + Parameters + ---------- + obj : array-like or object value + Object to check for *not* null or *non*-missing values. + + Returns + ------- + bool or array-like of bool + For scalar input, returns a scalar boolean. + For array input, returns an array of boolean indicating whether each + corresponding element is valid. + + See Also + -------- + isna : Boolean inverse of pandas.notna. + Series.notna : Detect valid values in a Series. + DataFrame.notna : Detect valid values in a DataFrame. + Index.notna : Detect valid values in an Index. + + Examples + -------- + Scalar arguments (including strings) result in a scalar boolean. + + >>> pd.notna('dog') + True + + >>> pd.notna(pd.NA) + False + + >>> pd.notna(np.nan) + False + + ndarrays result in an ndarray of booleans. + + >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) + >>> array + array([[ 1., nan, 3.], + [ 4., 5., nan]]) + >>> pd.notna(array) + array([[ True, False, True], + [ True, True, False]]) + + For indexes, an ndarray of booleans is returned. + + >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, + ... "2017-07-08"]) + >>> index + DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], + dtype='datetime64[ns]', freq=None) + >>> pd.notna(index) + array([ True, True, False, True]) + + For Series and DataFrame, the same type is returned, containing booleans. + + >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) + >>> df + 0 1 2 + 0 ant bee cat + 1 dog None fly + >>> pd.notna(df) + 0 1 2 + 0 True True True + 1 True False True + + >>> pd.notna(df[1]) + 0 True + 1 False + Name: 1, dtype: bool + """ + res = isna(obj) + if isinstance(res, bool): + return not res + return ~res + + +notnull = notna + + +def array_equivalent( + left, + right, + strict_nan: bool = False, + dtype_equal: bool = False, +) -> bool: + """ + True if two arrays, left and right, have equal non-NaN elements, and NaNs + in corresponding locations. False otherwise. It is assumed that left and + right are NumPy arrays of the same dtype. The behavior of this function + (particularly with respect to NaNs) is not defined if the dtypes are + different. + + Parameters + ---------- + left, right : ndarrays + strict_nan : bool, default False + If True, consider NaN and None to be different. + dtype_equal : bool, default False + Whether `left` and `right` are known to have the same dtype + according to `is_dtype_equal`. Some methods like `BlockManager.equals`. + require that the dtypes match. Setting this to ``True`` can improve + performance, but will give different results for arrays that are + equal but different dtypes. + + Returns + ------- + b : bool + Returns True if the arrays are equivalent. + + Examples + -------- + >>> array_equivalent( + ... np.array([1, 2, np.nan]), + ... np.array([1, 2, np.nan])) + True + >>> array_equivalent( + ... np.array([1, np.nan, 2]), + ... np.array([1, 2, np.nan])) + False + """ + left, right = np.asarray(left), np.asarray(right) + + # shape compat + if left.shape != right.shape: + return False + + if dtype_equal: + # fastpath when we require that the dtypes match (Block.equals) + if left.dtype.kind in "fc": + return _array_equivalent_float(left, right) + elif left.dtype.kind in "mM": + return _array_equivalent_datetimelike(left, right) + elif is_string_or_object_np_dtype(left.dtype): + # TODO: fastpath for pandas' StringDtype + return _array_equivalent_object(left, right, strict_nan) + else: + return np.array_equal(left, right) + + # Slow path when we allow comparing different dtypes. + # Object arrays can contain None, NaN and NaT. + # string dtypes must be come to this path for NumPy 1.7.1 compat + if left.dtype.kind in "OSU" or right.dtype.kind in "OSU": + # Note: `in "OSU"` is non-trivially faster than `in ["O", "S", "U"]` + # or `in ("O", "S", "U")` + return _array_equivalent_object(left, right, strict_nan) + + # NaNs can occur in float and complex arrays. + if left.dtype.kind in "fc": + if not (left.size and right.size): + return True + return ((left == right) | (isna(left) & isna(right))).all() + + elif left.dtype.kind in "mM" or right.dtype.kind in "mM": + # datetime64, timedelta64, Period + if left.dtype != right.dtype: + return False + + left = left.view("i8") + right = right.view("i8") + + # if we have structured dtypes, compare first + if ( + left.dtype.type is np.void or right.dtype.type is np.void + ) and left.dtype != right.dtype: + return False + + return np.array_equal(left, right) + + +def _array_equivalent_float(left: np.ndarray, right: np.ndarray) -> bool: + return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all()) + + +def _array_equivalent_datetimelike(left: np.ndarray, right: np.ndarray): + return np.array_equal(left.view("i8"), right.view("i8")) + + +def _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bool): + left = ensure_object(left) + right = ensure_object(right) + + mask: npt.NDArray[np.bool_] | None = None + if strict_nan: + mask = isna(left) & isna(right) + if not mask.any(): + mask = None + + try: + if mask is None: + return lib.array_equivalent_object(left, right) + if not lib.array_equivalent_object(left[~mask], right[~mask]): + return False + left_remaining = left[mask] + right_remaining = right[mask] + except ValueError: + # can raise a ValueError if left and right cannot be + # compared (e.g. nested arrays) + left_remaining = left + right_remaining = right + + for left_value, right_value in zip(left_remaining, right_remaining): + if left_value is NaT and right_value is not NaT: + return False + + elif left_value is libmissing.NA and right_value is not libmissing.NA: + return False + + elif isinstance(left_value, float) and np.isnan(left_value): + if not isinstance(right_value, float) or not np.isnan(right_value): + return False + else: + with warnings.catch_warnings(): + # suppress numpy's "elementwise comparison failed" + warnings.simplefilter("ignore", DeprecationWarning) + try: + if np.any(np.asarray(left_value != right_value)): + return False + except TypeError as err: + if "boolean value of NA is ambiguous" in str(err): + return False + raise + except ValueError: + # numpy can raise a ValueError if left and right cannot be + # compared (e.g. nested arrays) + return False + return True + + +def array_equals(left: ArrayLike, right: ArrayLike) -> bool: + """ + ExtensionArray-compatible implementation of array_equivalent. + """ + if left.dtype != right.dtype: + return False + elif isinstance(left, ABCExtensionArray): + return left.equals(right) + else: + return array_equivalent(left, right, dtype_equal=True) + + +def infer_fill_value(val): + """ + infer the fill value for the nan/NaT from the provided + scalar/ndarray/list-like if we are a NaT, return the correct dtyped + element to provide proper block construction + """ + if not is_list_like(val): + val = [val] + val = np.asarray(val) + if val.dtype.kind in "mM": + return np.array("NaT", dtype=val.dtype) + elif val.dtype == object: + dtype = lib.infer_dtype(ensure_object(val), skipna=False) + if dtype in ["datetime", "datetime64"]: + return np.array("NaT", dtype=DT64NS_DTYPE) + elif dtype in ["timedelta", "timedelta64"]: + return np.array("NaT", dtype=TD64NS_DTYPE) + return np.array(np.nan, dtype=object) + elif val.dtype.kind == "U": + return np.array(np.nan, dtype=val.dtype) + return np.nan + + +def construct_1d_array_from_inferred_fill_value( + value: object, length: int +) -> ArrayLike: + # Find our empty_value dtype by constructing an array + # from our value and doing a .take on it + from pandas.core.algorithms import take_nd + from pandas.core.construction import sanitize_array + from pandas.core.indexes.base import Index + + arr = sanitize_array(value, Index(range(1)), copy=False) + taker = -1 * np.ones(length, dtype=np.intp) + return take_nd(arr, taker) + + +def maybe_fill(arr: np.ndarray) -> np.ndarray: + """ + Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype. + """ + if arr.dtype.kind not in "iub": + arr.fill(np.nan) + return arr + + +def na_value_for_dtype(dtype: DtypeObj, compat: bool = True): + """ + Return a dtype compat na value + + Parameters + ---------- + dtype : string / dtype + compat : bool, default True + + Returns + ------- + np.dtype or a pandas dtype + + Examples + -------- + >>> na_value_for_dtype(np.dtype('int64')) + 0 + >>> na_value_for_dtype(np.dtype('int64'), compat=False) + nan + >>> na_value_for_dtype(np.dtype('float64')) + nan + >>> na_value_for_dtype(np.dtype('bool')) + False + >>> na_value_for_dtype(np.dtype('datetime64[ns]')) + numpy.datetime64('NaT') + """ + + if isinstance(dtype, ExtensionDtype): + return dtype.na_value + elif dtype.kind in "mM": + unit = np.datetime_data(dtype)[0] + return dtype.type("NaT", unit) + elif dtype.kind == "f": + return np.nan + elif dtype.kind in "iu": + if compat: + return 0 + return np.nan + elif dtype.kind == "b": + if compat: + return False + return np.nan + return np.nan + + +def remove_na_arraylike(arr: Series | Index | np.ndarray): + """ + Return array-like containing only true/non-NaN values, possibly empty. + """ + if isinstance(arr.dtype, ExtensionDtype): + return arr[notna(arr)] + else: + return arr[notna(np.asarray(arr))] + + +def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool: + """ + isna check that excludes incompatible dtypes + + Parameters + ---------- + obj : object + dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype + + Returns + ------- + bool + """ + if not lib.is_scalar(obj) or not isna(obj): + return False + elif dtype.kind == "M": + if isinstance(dtype, np.dtype): + # i.e. not tzaware + return not isinstance(obj, (np.timedelta64, Decimal)) + # we have to rule out tznaive dt64("NaT") + return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal)) + elif dtype.kind == "m": + return not isinstance(obj, (np.datetime64, Decimal)) + elif dtype.kind in "iufc": + # Numeric + return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64)) + elif dtype.kind == "b": + # We allow pd.NA, None, np.nan in BooleanArray (same as IntervalDtype) + return lib.is_float(obj) or obj is None or obj is libmissing.NA + + elif dtype == _dtype_str: + # numpy string dtypes to avoid float np.nan + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float)) + + elif dtype == _dtype_object: + # This is needed for Categorical, but is kind of weird + return True + + elif isinstance(dtype, PeriodDtype): + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) + + elif isinstance(dtype, IntervalDtype): + return lib.is_float(obj) or obj is None or obj is libmissing.NA + + elif isinstance(dtype, CategoricalDtype): + return is_valid_na_for_dtype(obj, dtype.categories.dtype) + + # fallback, default to allowing NaN, None, NA, NaT + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) + + +def isna_all(arr: ArrayLike) -> bool: + """ + Optimized equivalent to isna(arr).all() + """ + total_len = len(arr) + + # Usually it's enough to check but a small fraction of values to see if + # a block is NOT null, chunks should help in such cases. + # parameters 1000 and 40 were chosen arbitrarily + chunk_len = max(total_len // 40, 1000) + + dtype = arr.dtype + if lib.is_np_dtype(dtype, "f"): + checker = nan_checker + + elif (lib.is_np_dtype(dtype, "mM")) or isinstance( + dtype, (DatetimeTZDtype, PeriodDtype) + ): + # error: Incompatible types in assignment (expression has type + # "Callable[[Any], Any]", variable has type "ufunc") + checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment] + + else: + # error: Incompatible types in assignment (expression has type "Callable[[Any], + # Any]", variable has type "ufunc") + checker = lambda x: _isna_array( # type: ignore[assignment] + x, inf_as_na=INF_AS_NA + ) + + return all( + checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len) + ) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__init__.py b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8248f378e2c1acea37bdc2d41065c591360b902a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__init__.py @@ -0,0 +1,15 @@ +from pandas.core.groupby.generic import ( + DataFrameGroupBy, + NamedAgg, + SeriesGroupBy, +) +from pandas.core.groupby.groupby import GroupBy +from pandas.core.groupby.grouper import Grouper + +__all__ = [ + "DataFrameGroupBy", + "NamedAgg", + "SeriesGroupBy", + "GroupBy", + "Grouper", +] diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5815d1caad0fa1606a96c1217346611b4ca73599 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bd039b2cf03b7f8c20c582bab250a4348b24152 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63280df50584ea280174bf50ba96bc61093f855b Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..308702be8334d0398483bb7fc3a36f74e9fc2a89 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2399e70402a31319dc66eab1f23aa7bccc99625b Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb0e01d02d0bd7c668f07224fabd4c1ab4ea568c Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98f043dc42c6a332df03d1fae1b6006a6db4ed22 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16666b9fba37ac1841e7a1ae10803e992984884d Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/base.py b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/base.py new file mode 100644 index 0000000000000000000000000000000000000000..a443597347283887deb9cbd3eafb5f6d3bb6d9a6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/base.py @@ -0,0 +1,121 @@ +""" +Provide basic components for groupby. +""" +from __future__ import annotations + +import dataclasses +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Hashable + + +@dataclasses.dataclass(order=True, frozen=True) +class OutputKey: + label: Hashable + position: int + + +# special case to prevent duplicate plots when catching exceptions when +# forwarding methods from NDFrames +plotting_methods = frozenset(["plot", "hist"]) + +# cythonized transformations or canned "agg+broadcast", which do not +# require postprocessing of the result by transform. +cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"]) + +# List of aggregation/reduction functions. +# These map each group to a single numeric value +reduction_kernels = frozenset( + [ + "all", + "any", + "corrwith", + "count", + "first", + "idxmax", + "idxmin", + "last", + "max", + "mean", + "median", + "min", + "nunique", + "prod", + # as long as `quantile`'s signature accepts only + # a single quantile value, it's a reduction. + # GH#27526 might change that. + "quantile", + "sem", + "size", + "skew", + "std", + "sum", + "var", + ] +) + +# List of transformation functions. +# a transformation is a function that, for each group, +# produces a result that has the same shape as the group. + + +transformation_kernels = frozenset( + [ + "bfill", + "cumcount", + "cummax", + "cummin", + "cumprod", + "cumsum", + "diff", + "ffill", + "fillna", + "ngroup", + "pct_change", + "rank", + "shift", + ] +) + +# these are all the public methods on Grouper which don't belong +# in either of the above lists +groupby_other_methods = frozenset( + [ + "agg", + "aggregate", + "apply", + "boxplot", + # corr and cov return ngroups*ncolumns rows, so they + # are neither a transformation nor a reduction + "corr", + "cov", + "describe", + "dtypes", + "expanding", + "ewm", + "filter", + "get_group", + "groups", + "head", + "hist", + "indices", + "ndim", + "ngroups", + "nth", + "ohlc", + "pipe", + "plot", + "resample", + "rolling", + "tail", + "take", + "transform", + "sample", + "value_counts", + ] +) +# Valid values of `name` for `groupby.transform(name)` +# NOTE: do NOT edit this directly. New additions should be inserted +# into the appropriate list above. +transform_kernel_allowlist = reduction_kernels | transformation_kernels diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/categorical.py b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab98cf4fe55e9b064db99e61d1245cb83b63dc1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/categorical.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import numpy as np + +from pandas.core.algorithms import unique1d +from pandas.core.arrays.categorical import ( + Categorical, + CategoricalDtype, + recode_for_categories, +) + + +def recode_for_groupby( + c: Categorical, sort: bool, observed: bool +) -> tuple[Categorical, Categorical | None]: + """ + Code the categories to ensure we can groupby for categoricals. + + If observed=True, we return a new Categorical with the observed + categories only. + + If sort=False, return a copy of self, coded with categories as + returned by .unique(), followed by any categories not appearing in + the data. If sort=True, return self. + + This method is needed solely to ensure the categorical index of the + GroupBy result has categories in the order of appearance in the data + (GH-8868). + + Parameters + ---------- + c : Categorical + sort : bool + The value of the sort parameter groupby was called with. + observed : bool + Account only for the observed values + + Returns + ------- + Categorical + If sort=False, the new categories are set to the order of + appearance in codes (unless ordered=True, in which case the + original order is preserved), followed by any unrepresented + categories in the original order. + Categorical or None + If we are observed, return the original categorical, otherwise None + """ + # we only care about observed values + if observed: + # In cases with c.ordered, this is equivalent to + # return c.remove_unused_categories(), c + + unique_codes = unique1d(c.codes) + + take_codes = unique_codes[unique_codes != -1] + if sort: + take_codes = np.sort(take_codes) + + # we recode according to the uniques + categories = c.categories.take(take_codes) + codes = recode_for_categories(c.codes, c.categories, categories) + + # return a new categorical that maps our new codes + # and categories + dtype = CategoricalDtype(categories, ordered=c.ordered) + return Categorical._simple_new(codes, dtype=dtype), c + + # Already sorted according to c.categories; all is fine + if sort: + return c, None + + # sort=False should order groups in as-encountered order (GH-8868) + + # xref GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories + all_codes = np.arange(c.categories.nunique()) + # GH 38140: exclude nan from indexer for categories + unique_notnan_codes = unique1d(c.codes[c.codes != -1]) + if sort: + unique_notnan_codes = np.sort(unique_notnan_codes) + if len(all_codes) > len(unique_notnan_codes): + # GH 13179: All categories need to be present, even if missing from the data + missing_codes = np.setdiff1d(all_codes, unique_notnan_codes, assume_unique=True) + take_codes = np.concatenate((unique_notnan_codes, missing_codes)) + else: + take_codes = unique_notnan_codes + + return Categorical(c, c.unique().categories.take(take_codes)), None diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/generic.py b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..f2e314046fb749e7d32bf7fc76ca22eb1194a328 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/generic.py @@ -0,0 +1,2852 @@ +""" +Define the SeriesGroupBy and DataFrameGroupBy +classes that hold the groupby interfaces (and some implementations). + +These are user facing as the result of the ``df.groupby(...)`` operations, +which here returns a DataFrameGroupBy object. +""" +from __future__ import annotations + +from collections import abc +from functools import partial +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + NamedTuple, + TypeVar, + Union, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import ( + Interval, + lib, +) +from pandas._libs.hashtable import duplicated +from pandas.errors import SpecificationError +from pandas.util._decorators import ( + Appender, + Substitution, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + ensure_int64, + is_bool, + is_dict_like, + is_integer_dtype, + is_list_like, + is_numeric_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, +) +from pandas.core.dtypes.inference import is_hashable +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import algorithms +from pandas.core.apply import ( + GroupByApply, + maybe_mangle_lambdas, + reconstruct_func, + validate_func_kwargs, + warn_alias_replacement, +) +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.core.groupby import ( + base, + ops, +) +from pandas.core.groupby.groupby import ( + GroupBy, + GroupByPlot, + _agg_template_frame, + _agg_template_series, + _apply_docs, + _transform_template, +) +from pandas.core.indexes.api import ( + Index, + MultiIndex, + all_indexes_same, + default_index, +) +from pandas.core.series import Series +from pandas.core.sorting import get_group_index +from pandas.core.util.numba_ import maybe_use_numba + +from pandas.plotting import boxplot_frame_groupby + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Mapping, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + Axis, + AxisInt, + CorrelationMethod, + FillnaOptions, + IndexLabel, + Manager, + Manager2D, + SingleManager, + TakeIndexer, + ) + + from pandas import Categorical + from pandas.core.generic import NDFrame + +# TODO(typing) the return value on this callable should be any *scalar*. +AggScalar = Union[str, Callable[..., Any]] +# TODO: validate types on ScalarResult and move to _typing +# Blocked from using by https://github.com/python/mypy/issues/1484 +# See note at _mangle_lambda_list +ScalarResult = TypeVar("ScalarResult") + + +class NamedAgg(NamedTuple): + """ + Helper for column specific aggregation with control over output column names. + + Subclass of typing.NamedTuple. + + Parameters + ---------- + column : Hashable + Column label in the DataFrame to apply aggfunc. + aggfunc : function or str + Function to apply to the provided column. If string, the name of a built-in + pandas function. + + Examples + -------- + >>> df = pd.DataFrame({"key": [1, 1, 2], "a": [-1, 0, 1], 1: [10, 11, 12]}) + >>> agg_a = pd.NamedAgg(column="a", aggfunc="min") + >>> agg_1 = pd.NamedAgg(column=1, aggfunc=lambda x: np.mean(x)) + >>> df.groupby("key").agg(result_a=agg_a, result_1=agg_1) + result_a result_1 + key + 1 -1 10.5 + 2 1 12.0 + """ + + column: Hashable + aggfunc: AggScalar + + +class SeriesGroupBy(GroupBy[Series]): + def _wrap_agged_manager(self, mgr: Manager) -> Series: + out = self.obj._constructor_from_mgr(mgr, axes=mgr.axes) + out._name = self.obj.name + return out + + def _get_data_to_aggregate( + self, *, numeric_only: bool = False, name: str | None = None + ) -> SingleManager: + ser = self._obj_with_exclusions + single = ser._mgr + if numeric_only and not is_numeric_dtype(ser.dtype): + # GH#41291 match Series behavior + kwd_name = "numeric_only" + raise TypeError( + f"Cannot use {kwd_name}=True with " + f"{type(self).__name__}.{name} and non-numeric dtypes." + ) + return single + + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + dtype: int64 + + >>> s.groupby([1, 1, 2, 2]).min() + 1 1 + 2 3 + dtype: int64 + + >>> s.groupby([1, 1, 2, 2]).agg('min') + 1 1 + 2 3 + dtype: int64 + + >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max']) + min max + 1 1 2 + 2 3 4 + + The output column names can be controlled by passing + the desired column names and aggregations as keyword arguments. + + >>> s.groupby([1, 1, 2, 2]).agg( + ... minimum='min', + ... maximum='max', + ... ) + minimum maximum + 1 1 2 + 2 3 4 + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the aggregating function. + + >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min()) + 1 1.0 + 2 3.0 + dtype: float64 + """ + ) + + @Appender( + _apply_docs["template"].format( + input="series", examples=_apply_docs["series_examples"] + ) + ) + def apply(self, func, *args, **kwargs) -> Series: + return super().apply(func, *args, **kwargs) + + @doc(_agg_template_series, examples=_agg_examples_doc, klass="Series") + def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): + relabeling = func is None + columns = None + if relabeling: + columns, func = validate_func_kwargs(kwargs) + kwargs = {} + + if isinstance(func, str): + if maybe_use_numba(engine) and engine is not None: + # Not all agg functions support numba, only propagate numba kwargs + # if user asks for numba, and engine is not None + # (if engine is None, the called function will handle the case where + # numba is requested via the global option) + kwargs["engine"] = engine + if engine_kwargs is not None: + kwargs["engine_kwargs"] = engine_kwargs + return getattr(self, func)(*args, **kwargs) + + elif isinstance(func, abc.Iterable): + # Catch instances of lists / tuples + # but not the class list / tuple itself. + func = maybe_mangle_lambdas(func) + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + ret = self._aggregate_multiple_funcs(func, *args, **kwargs) + if relabeling: + # columns is not narrowed by mypy from relabeling flag + assert columns is not None # for mypy + ret.columns = columns + if not self.as_index: + ret = ret.reset_index() + return ret + + else: + cyfunc = com.get_cython_func(func) + if cyfunc and not args and not kwargs: + warn_alias_replacement(self, func, cyfunc) + return getattr(self, cyfunc)() + + if maybe_use_numba(engine): + return self._aggregate_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + + if self.ngroups == 0: + # e.g. test_evaluate_with_empty_groups without any groups to + # iterate over, we have no output on which to do dtype + # inference. We default to using the existing dtype. + # xref GH#51445 + obj = self._obj_with_exclusions + return self.obj._constructor( + [], + name=self.obj.name, + index=self._grouper.result_index, + dtype=obj.dtype, + ) + + if self._grouper.nkeys > 1: + return self._python_agg_general(func, *args, **kwargs) + + try: + return self._python_agg_general(func, *args, **kwargs) + except KeyError: + # KeyError raised in test_groupby.test_basic is bc the func does + # a dictionary lookup on group.name, but group name is not + # pinned in _python_agg_general, only in _aggregate_named + result = self._aggregate_named(func, *args, **kwargs) + + warnings.warn( + "Pinning the groupby key to each group in " + f"{type(self).__name__}.agg is deprecated, and cases that " + "relied on it will raise in a future version. " + "If your operation requires utilizing the groupby keys, " + "iterate over the groupby object instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + # result is a dict whose keys are the elements of result_index + result = Series(result, index=self._grouper.result_index) + result = self._wrap_aggregated_output(result) + return result + + agg = aggregate + + def _python_agg_general(self, func, *args, **kwargs): + orig_func = func + func = com.is_builtin_func(func) + if orig_func != func: + alias = com._builtin_table_alias[func] + warn_alias_replacement(self, orig_func, alias) + f = lambda x: func(x, *args, **kwargs) + + obj = self._obj_with_exclusions + result = self._grouper.agg_series(obj, f) + res = obj._constructor(result, name=obj.name) + return self._wrap_aggregated_output(res) + + def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame: + if isinstance(arg, dict): + if self.as_index: + # GH 15931 + raise SpecificationError("nested renamer is not supported") + else: + # GH#50684 - This accidentally worked in 1.x + msg = ( + "Passing a dictionary to SeriesGroupBy.agg is deprecated " + "and will raise in a future version of pandas. Pass a list " + "of aggregations instead." + ) + warnings.warn( + message=msg, + category=FutureWarning, + stacklevel=find_stack_level(), + ) + arg = list(arg.items()) + elif any(isinstance(x, (tuple, list)) for x in arg): + arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg] + else: + # list of functions / function names + columns = (com.get_callable_name(f) or f for f in arg) + arg = zip(columns, arg) + + results: dict[base.OutputKey, DataFrame | Series] = {} + with com.temp_setattr(self, "as_index", True): + # Combine results using the index, need to adjust index after + # if as_index=False (GH#50724) + for idx, (name, func) in enumerate(arg): + key = base.OutputKey(label=name, position=idx) + results[key] = self.aggregate(func, *args, **kwargs) + + if any(isinstance(x, DataFrame) for x in results.values()): + from pandas import concat + + res_df = concat( + results.values(), axis=1, keys=[key.label for key in results] + ) + return res_df + + indexed_output = {key.position: val for key, val in results.items()} + output = self.obj._constructor_expanddim(indexed_output, index=None) + output.columns = Index(key.label for key in results) + + return output + + def _wrap_applied_output( + self, + data: Series, + values: list[Any], + not_indexed_same: bool = False, + is_transform: bool = False, + ) -> DataFrame | Series: + """ + Wrap the output of SeriesGroupBy.apply into the expected result. + + Parameters + ---------- + data : Series + Input data for groupby operation. + values : List[Any] + Applied output for each group. + not_indexed_same : bool, default False + Whether the applied outputs are not indexed the same as the group axes. + + Returns + ------- + DataFrame or Series + """ + if len(values) == 0: + # GH #6265 + if is_transform: + # GH#47787 see test_group_on_empty_multiindex + res_index = data.index + else: + res_index = self._grouper.result_index + + return self.obj._constructor( + [], + name=self.obj.name, + index=res_index, + dtype=data.dtype, + ) + assert values is not None + + if isinstance(values[0], dict): + # GH #823 #24880 + index = self._grouper.result_index + res_df = self.obj._constructor_expanddim(values, index=index) + res_df = self._reindex_output(res_df) + # if self.observed is False, + # keep all-NaN rows created while re-indexing + res_ser = res_df.stack(future_stack=True) + res_ser.name = self.obj.name + return res_ser + elif isinstance(values[0], (Series, DataFrame)): + result = self._concat_objects( + values, + not_indexed_same=not_indexed_same, + is_transform=is_transform, + ) + if isinstance(result, Series): + result.name = self.obj.name + if not self.as_index and not_indexed_same: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + return result + else: + # GH #6265 #24880 + result = self.obj._constructor( + data=values, index=self._grouper.result_index, name=self.obj.name + ) + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + return self._reindex_output(result) + + def _aggregate_named(self, func, *args, **kwargs): + # Note: this is very similar to _aggregate_series_pure_python, + # but that does not pin group.name + result = {} + initialized = False + + for name, group in self._grouper.get_iterator( + self._obj_with_exclusions, axis=self.axis + ): + # needed for pandas/tests/groupby/test_groupby.py::test_basic_aggregations + object.__setattr__(group, "name", name) + + output = func(group, *args, **kwargs) + output = ops.extract_result(output) + if not initialized: + # We only do this validation on the first iteration + ops.check_result_array(output, group.dtype) + initialized = True + result[name] = output + + return result + + __examples_series_doc = dedent( + """ + >>> ser = pd.Series([390.0, 350.0, 30.0, 20.0], + ... index=["Falcon", "Falcon", "Parrot", "Parrot"], + ... name="Max Speed") + >>> grouped = ser.groupby([1, 1, 2, 2]) + >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) + Falcon 0.707107 + Falcon -0.707107 + Parrot 0.707107 + Parrot -0.707107 + Name: Max Speed, dtype: float64 + + Broadcast result of the transformation + + >>> grouped.transform(lambda x: x.max() - x.min()) + Falcon 40.0 + Falcon 40.0 + Parrot 10.0 + Parrot 10.0 + Name: Max Speed, dtype: float64 + + >>> grouped.transform("mean") + Falcon 370.0 + Falcon 370.0 + Parrot 25.0 + Parrot 25.0 + Name: Max Speed, dtype: float64 + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + for example: + + >>> grouped.transform(lambda x: x.astype(int).max()) + Falcon 390 + Falcon 390 + Parrot 30 + Parrot 30 + Name: Max Speed, dtype: int64 + """ + ) + + @Substitution(klass="Series", example=__examples_series_doc) + @Appender(_transform_template) + def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + return self._transform( + func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs + ) + + def _cython_transform( + self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs + ): + assert axis == 0 # handled by caller + + obj = self._obj_with_exclusions + + try: + result = self._grouper._cython_operation( + "transform", obj._values, how, axis, **kwargs + ) + except NotImplementedError as err: + # e.g. test_groupby_raises_string + raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err + + return obj._constructor(result, index=self.obj.index, name=obj.name) + + def _transform_general( + self, func: Callable, engine, engine_kwargs, *args, **kwargs + ) -> Series: + """ + Transform with a callable `func`. + """ + if maybe_use_numba(engine): + return self._transform_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + assert callable(func) + klass = type(self.obj) + + results = [] + for name, group in self._grouper.get_iterator( + self._obj_with_exclusions, axis=self.axis + ): + # this setattr is needed for test_transform_lambda_with_datetimetz + object.__setattr__(group, "name", name) + res = func(group, *args, **kwargs) + + results.append(klass(res, index=group.index)) + + # check for empty "results" to avoid concat ValueError + if results: + from pandas.core.reshape.concat import concat + + concatenated = concat(results) + result = self._set_result_index_ordered(concatenated) + else: + result = self.obj._constructor(dtype=np.float64) + + result.name = self.obj.name + return result + + def filter(self, func, dropna: bool = True, *args, **kwargs): + """ + Filter elements from groups that don't satisfy a criterion. + + Elements from groups are filtered if they do not satisfy the + boolean criterion specified by func. + + Parameters + ---------- + func : function + Criterion to apply to each group. Should return True or False. + dropna : bool + Drop groups that do not pass the filter. True by default; if False, + groups that evaluate False are filled with NaNs. + + Returns + ------- + Series + + Notes + ----- + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', + ... 'foo', 'bar'], + ... 'B' : [1, 2, 3, 4, 5, 6], + ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) + >>> grouped = df.groupby('A') + >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) + 1 2 + 3 4 + 5 6 + Name: B, dtype: int64 + """ + if isinstance(func, str): + wrapper = lambda x: getattr(x, func)(*args, **kwargs) + else: + wrapper = lambda x: func(x, *args, **kwargs) + + # Interpret np.nan as False. + def true_and_notna(x) -> bool: + b = wrapper(x) + return notna(b) and b + + try: + indices = [ + self._get_index(name) + for name, group in self._grouper.get_iterator( + self._obj_with_exclusions, axis=self.axis + ) + if true_and_notna(group) + ] + except (ValueError, TypeError) as err: + raise TypeError("the filter must return a boolean result") from err + + filtered = self._apply_filter(indices, dropna) + return filtered + + def nunique(self, dropna: bool = True) -> Series | DataFrame: + """ + Return number of unique elements in the group. + + Returns + ------- + Series + Number of unique values within each group. + + Examples + -------- + For SeriesGroupby: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).nunique() + a 2 + b 1 + dtype: int64 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 3], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 3 + dtype: int64 + >>> ser.resample('MS').nunique() + 2023-01-01 2 + 2023-02-01 1 + Freq: MS, dtype: int64 + """ + ids, _, ngroups = self._grouper.group_info + val = self.obj._values + codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False) + + if self._grouper.has_dropped_na: + mask = ids >= 0 + ids = ids[mask] + codes = codes[mask] + + group_index = get_group_index( + labels=[ids, codes], + shape=(ngroups, len(uniques)), + sort=False, + xnull=dropna, + ) + + if dropna: + mask = group_index >= 0 + if (~mask).any(): + ids = ids[mask] + group_index = group_index[mask] + + mask = duplicated(group_index, "first") + res = np.bincount(ids[~mask], minlength=ngroups) + res = ensure_int64(res) + + ri = self._grouper.result_index + result: Series | DataFrame = self.obj._constructor( + res, index=ri, name=self.obj.name + ) + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + return self._reindex_output(result, fill_value=0) + + @doc(Series.describe) + def describe(self, percentiles=None, include=None, exclude=None) -> Series: + return super().describe( + percentiles=percentiles, include=include, exclude=exclude + ) + + def value_counts( + self, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + bins=None, + dropna: bool = True, + ) -> Series | DataFrame: + name = "proportion" if normalize else "count" + + if bins is None: + result = self._value_counts( + normalize=normalize, sort=sort, ascending=ascending, dropna=dropna + ) + result.name = name + return result + + from pandas.core.reshape.merge import get_join_indexers + from pandas.core.reshape.tile import cut + + ids, _, _ = self._grouper.group_info + val = self.obj._values + + index_names = self._grouper.names + [self.obj.name] + + if isinstance(val.dtype, CategoricalDtype) or ( + bins is not None and not np.iterable(bins) + ): + # scalar bins cannot be done at top level + # in a backward compatible way + # GH38672 relates to categorical dtype + ser = self.apply( + Series.value_counts, + normalize=normalize, + sort=sort, + ascending=ascending, + bins=bins, + ) + ser.name = name + ser.index.names = index_names + return ser + + # groupby removes null keys from groupings + mask = ids != -1 + ids, val = ids[mask], val[mask] + + lab: Index | np.ndarray + if bins is None: + lab, lev = algorithms.factorize(val, sort=True) + llab = lambda lab, inc: lab[inc] + else: + # lab is a Categorical with categories an IntervalIndex + cat_ser = cut(Series(val, copy=False), bins, include_lowest=True) + cat_obj = cast("Categorical", cat_ser._values) + lev = cat_obj.categories + lab = lev.take( + cat_obj.codes, + allow_fill=True, + fill_value=lev._na_value, + ) + llab = lambda lab, inc: lab[inc]._multiindex.codes[-1] + + if isinstance(lab.dtype, IntervalDtype): + # TODO: should we do this inside II? + lab_interval = cast(Interval, lab) + + sorter = np.lexsort((lab_interval.left, lab_interval.right, ids)) + else: + sorter = np.lexsort((lab, ids)) + + ids, lab = ids[sorter], lab[sorter] + + # group boundaries are where group ids change + idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0] + idx = np.r_[0, idchanges] + if not len(ids): + idx = idchanges + + # new values are where sorted labels change + lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1)) + inc = np.r_[True, lchanges] + if not len(val): + inc = lchanges + inc[idx] = True # group boundaries are also new values + out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts + + # num. of times each group should be repeated + rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx)) + + # multi-index components + codes = self._grouper.reconstructed_codes + codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)] + levels = [ping._group_index for ping in self._grouper.groupings] + [lev] + + if dropna: + mask = codes[-1] != -1 + if mask.all(): + dropna = False + else: + out, codes = out[mask], [level_codes[mask] for level_codes in codes] + + if normalize: + out = out.astype("float") + d = np.diff(np.r_[idx, len(ids)]) + if dropna: + m = ids[lab == -1] + np.add.at(d, m, -1) + acc = rep(d)[mask] + else: + acc = rep(d) + out /= acc + + if sort and bins is None: + cat = ids[inc][mask] if dropna else ids[inc] + sorter = np.lexsort((out if ascending else -out, cat)) + out, codes[-1] = out[sorter], codes[-1][sorter] + + if bins is not None: + # for compat. with libgroupby.value_counts need to ensure every + # bin is present at every index level, null filled with zeros + diff = np.zeros(len(out), dtype="bool") + for level_codes in codes[:-1]: + diff |= np.r_[True, level_codes[1:] != level_codes[:-1]] + + ncat, nbin = diff.sum(), len(levels[-1]) + + left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)] + + right = [diff.cumsum() - 1, codes[-1]] + + # error: Argument 1 to "get_join_indexers" has incompatible type + # "List[ndarray[Any, Any]]"; expected "List[Union[Union[ExtensionArray, + # ndarray[Any, Any]], Index, Series]] + _, idx = get_join_indexers( + left, right, sort=False, how="left" # type: ignore[arg-type] + ) + if idx is not None: + out = np.where(idx != -1, out[idx], 0) + + if sort: + sorter = np.lexsort((out if ascending else -out, left[0])) + out, left[-1] = out[sorter], left[-1][sorter] + + # build the multi-index w/ full levels + def build_codes(lev_codes: np.ndarray) -> np.ndarray: + return np.repeat(lev_codes[diff], nbin) + + codes = [build_codes(lev_codes) for lev_codes in codes[:-1]] + codes.append(left[-1]) + + mi = MultiIndex( + levels=levels, codes=codes, names=index_names, verify_integrity=False + ) + + if is_integer_dtype(out.dtype): + out = ensure_int64(out) + result = self.obj._constructor(out, index=mi, name=name) + if not self.as_index: + result = result.reset_index() + return result + + def fillna( + self, + value: object | ArrayLike | None = None, + method: FillnaOptions | None = None, + axis: Axis | None | lib.NoDefault = lib.no_default, + inplace: bool = False, + limit: int | None = None, + downcast: dict | None | lib.NoDefault = lib.no_default, + ) -> Series | None: + """ + Fill NA/NaN values using the specified method within groups. + + .. deprecated:: 2.2.0 + This method is deprecated and will be removed in a future version. + Use the :meth:`.SeriesGroupBy.ffill` or :meth:`.SeriesGroupBy.bfill` + for forward or backward filling instead. If you want to fill with a + single value, use :meth:`Series.fillna` instead. + + Parameters + ---------- + value : scalar, dict, Series, or DataFrame + Value to use to fill holes (e.g. 0), alternately a + dict/Series/DataFrame of values specifying which value to use for + each index (for a Series) or column (for a DataFrame). Values not + in the dict/Series/DataFrame will not be filled. This value cannot + be a list. Users wanting to use the ``value`` argument and not ``method`` + should prefer :meth:`.Series.fillna` as this + will produce the same result and be more performant. + method : {{'bfill', 'ffill', None}}, default None + Method to use for filling holes. ``'ffill'`` will propagate + the last valid observation forward within a group. + ``'bfill'`` will use next valid observation to fill the gap. + axis : {0 or 'index', 1 or 'columns'} + Unused, only for compatibility with :meth:`DataFrameGroupBy.fillna`. + inplace : bool, default False + Broken. Do not set to True. + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill within a group. In other words, + if there is a gap with more than this number of consecutive NaNs, + it will only be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. + downcast : dict, default is None + A dict of item->dtype of what to downcast if possible, + or the string 'infer' which will try to downcast to an appropriate + equal type (e.g. float64 to int64 if possible). + + Returns + ------- + Series + Object with missing values filled within groups. + + See Also + -------- + ffill : Forward fill values within a group. + bfill : Backward fill values within a group. + + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['cat', 'cat', 'cat', 'mouse', 'mouse'] + >>> ser = pd.Series([1, None, None, 2, None], index=lst) + >>> ser + cat 1.0 + cat NaN + cat NaN + mouse 2.0 + mouse NaN + dtype: float64 + >>> ser.groupby(level=0).fillna(0, limit=1) + cat 1.0 + cat 0.0 + cat NaN + mouse 2.0 + mouse 0.0 + dtype: float64 + """ + warnings.warn( + f"{type(self).__name__}.fillna is deprecated and " + "will be removed in a future version. Use obj.ffill() or obj.bfill() " + "for forward or backward filling instead. If you want to fill with a " + f"single value, use {type(self.obj).__name__}.fillna instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + result = self._op_via_apply( + "fillna", + value=value, + method=method, + axis=axis, + inplace=inplace, + limit=limit, + downcast=downcast, + ) + return result + + def take( + self, + indices: TakeIndexer, + axis: Axis | lib.NoDefault = lib.no_default, + **kwargs, + ) -> Series: + """ + Return the elements in the given *positional* indices in each group. + + This means that we are not indexing according to actual values in + the index attribute of the object. We are indexing according to the + actual position of the element in the object. + + If a requested index does not exist for some group, this method will raise. + To get similar behavior that ignores indices that don't exist, see + :meth:`.SeriesGroupBy.nth`. + + Parameters + ---------- + indices : array-like + An array of ints indicating which positions to take in each group. + axis : {0 or 'index', 1 or 'columns', None}, default 0 + The axis on which to select elements. ``0`` means that we are + selecting rows, ``1`` means that we are selecting columns. + For `SeriesGroupBy` this parameter is unused and defaults to 0. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + **kwargs + For compatibility with :meth:`numpy.take`. Has no effect on the + output. + + Returns + ------- + Series + A Series containing the elements taken from each group. + + See Also + -------- + Series.take : Take elements from a Series along an axis. + Series.loc : Select a subset of a DataFrame by labels. + Series.iloc : Select a subset of a DataFrame by positions. + numpy.take : Take elements from an array along an axis. + SeriesGroupBy.nth : Similar to take, won't raise if indices don't exist. + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey', 'mammal', np.nan), + ... ('rabbit', 'mammal', 15.0)], + ... columns=['name', 'class', 'max_speed'], + ... index=[4, 3, 2, 1, 0]) + >>> df + name class max_speed + 4 falcon bird 389.0 + 3 parrot bird 24.0 + 2 lion mammal 80.5 + 1 monkey mammal NaN + 0 rabbit mammal 15.0 + >>> gb = df["name"].groupby([1, 1, 2, 2, 2]) + + Take elements at positions 0 and 1 along the axis 0 in each group (default). + + >>> gb.take([0, 1]) + 1 4 falcon + 3 parrot + 2 2 lion + 1 monkey + Name: name, dtype: object + + We may take elements using negative integers for positive indices, + starting from the end of the object, just like with Python lists. + + >>> gb.take([-1, -2]) + 1 3 parrot + 4 falcon + 2 0 rabbit + 1 monkey + Name: name, dtype: object + """ + result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs) + return result + + def skew( + self, + axis: Axis | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ) -> Series: + """ + Return unbiased skew within groups. + + Normalized by N-1. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns', None}, default 0 + Axis for the function to be applied on. + This parameter is only for compatibility with DataFrame and is unused. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values when computing the result. + + numeric_only : bool, default False + Include only float, int, boolean columns. Not implemented for Series. + + **kwargs + Additional keyword arguments to be passed to the function. + + Returns + ------- + Series + + See Also + -------- + Series.skew : Return unbiased skew over requested axis. + + Examples + -------- + >>> ser = pd.Series([390., 350., 357., np.nan, 22., 20., 30.], + ... index=['Falcon', 'Falcon', 'Falcon', 'Falcon', + ... 'Parrot', 'Parrot', 'Parrot'], + ... name="Max Speed") + >>> ser + Falcon 390.0 + Falcon 350.0 + Falcon 357.0 + Falcon NaN + Parrot 22.0 + Parrot 20.0 + Parrot 30.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level=0).skew() + Falcon 1.525174 + Parrot 1.457863 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level=0).skew(skipna=False) + Falcon NaN + Parrot 1.457863 + Name: Max Speed, dtype: float64 + """ + if axis is lib.no_default: + axis = 0 + + if axis != 0: + result = self._op_via_apply( + "skew", + axis=axis, + skipna=skipna, + numeric_only=numeric_only, + **kwargs, + ) + return result + + def alt(obj): + # This should not be reached since the cython path should raise + # TypeError and not NotImplementedError. + raise TypeError(f"'skew' is not supported for dtype={obj.dtype}") + + return self._cython_agg_general( + "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs + ) + + @property + @doc(Series.plot.__doc__) + def plot(self) -> GroupByPlot: + result = GroupByPlot(self) + return result + + @doc(Series.nlargest.__doc__) + def nlargest( + self, n: int = 5, keep: Literal["first", "last", "all"] = "first" + ) -> Series: + f = partial(Series.nlargest, n=n, keep=keep) + data = self._obj_with_exclusions + # Don't change behavior if result index happens to be the same, i.e. + # already ordered and n >= all group sizes. + result = self._python_apply_general(f, data, not_indexed_same=True) + return result + + @doc(Series.nsmallest.__doc__) + def nsmallest( + self, n: int = 5, keep: Literal["first", "last", "all"] = "first" + ) -> Series: + f = partial(Series.nsmallest, n=n, keep=keep) + data = self._obj_with_exclusions + # Don't change behavior if result index happens to be the same, i.e. + # already ordered and n >= all group sizes. + result = self._python_apply_general(f, data, not_indexed_same=True) + return result + + @doc(Series.idxmin.__doc__) + def idxmin( + self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True + ) -> Series: + return self._idxmax_idxmin("idxmin", axis=axis, skipna=skipna) + + @doc(Series.idxmax.__doc__) + def idxmax( + self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True + ) -> Series: + return self._idxmax_idxmin("idxmax", axis=axis, skipna=skipna) + + @doc(Series.corr.__doc__) + def corr( + self, + other: Series, + method: CorrelationMethod = "pearson", + min_periods: int | None = None, + ) -> Series: + result = self._op_via_apply( + "corr", other=other, method=method, min_periods=min_periods + ) + return result + + @doc(Series.cov.__doc__) + def cov( + self, other: Series, min_periods: int | None = None, ddof: int | None = 1 + ) -> Series: + result = self._op_via_apply( + "cov", other=other, min_periods=min_periods, ddof=ddof + ) + return result + + @property + def is_monotonic_increasing(self) -> Series: + """ + Return whether each group's values are monotonically increasing. + + Returns + ------- + Series + + Examples + -------- + >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot']) + >>> s.groupby(level=0).is_monotonic_increasing + Falcon False + Parrot True + dtype: bool + """ + return self.apply(lambda ser: ser.is_monotonic_increasing) + + @property + def is_monotonic_decreasing(self) -> Series: + """ + Return whether each group's values are monotonically decreasing. + + Returns + ------- + Series + + Examples + -------- + >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot']) + >>> s.groupby(level=0).is_monotonic_decreasing + Falcon True + Parrot False + dtype: bool + """ + return self.apply(lambda ser: ser.is_monotonic_decreasing) + + @doc(Series.hist.__doc__) + def hist( + self, + by=None, + ax=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot: float | None = None, + ylabelsize: int | None = None, + yrot: float | None = None, + figsize: tuple[int, int] | None = None, + bins: int | Sequence[int] = 10, + backend: str | None = None, + legend: bool = False, + **kwargs, + ): + result = self._op_via_apply( + "hist", + by=by, + ax=ax, + grid=grid, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + figsize=figsize, + bins=bins, + backend=backend, + legend=legend, + **kwargs, + ) + return result + + @property + @doc(Series.dtype.__doc__) + def dtype(self) -> Series: + return self.apply(lambda ser: ser.dtype) + + def unique(self) -> Series: + """ + Return unique values for each group. + + It returns unique values for each of the grouped values. Returned in + order of appearance. Hash table-based unique, therefore does NOT sort. + + Returns + ------- + Series + Unique values for each of the grouped values. + + See Also + -------- + Series.unique : Return unique values of Series object. + + Examples + -------- + >>> df = pd.DataFrame([('Chihuahua', 'dog', 6.1), + ... ('Beagle', 'dog', 15.2), + ... ('Chihuahua', 'dog', 6.9), + ... ('Persian', 'cat', 9.2), + ... ('Chihuahua', 'dog', 7), + ... ('Persian', 'cat', 8.8)], + ... columns=['breed', 'animal', 'height_in']) + >>> df + breed animal height_in + 0 Chihuahua dog 6.1 + 1 Beagle dog 15.2 + 2 Chihuahua dog 6.9 + 3 Persian cat 9.2 + 4 Chihuahua dog 7.0 + 5 Persian cat 8.8 + >>> ser = df.groupby('animal')['breed'].unique() + >>> ser + animal + cat [Persian] + dog [Chihuahua, Beagle] + Name: breed, dtype: object + """ + result = self._op_via_apply("unique") + return result + + +class DataFrameGroupBy(GroupBy[DataFrame]): + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> data = {"A": [1, 1, 2, 2], + ... "B": [1, 2, 3, 4], + ... "C": [0.362838, 0.227877, 1.267767, -0.562860]} + >>> df = pd.DataFrame(data) + >>> df + A B C + 0 1 1 0.362838 + 1 1 2 0.227877 + 2 2 3 1.267767 + 3 2 4 -0.562860 + + The aggregation is for each column. + + >>> df.groupby('A').agg('min') + B C + A + 1 1 0.227877 + 2 3 -0.562860 + + Multiple aggregations + + >>> df.groupby('A').agg(['min', 'max']) + B C + min max min max + A + 1 1 2 0.227877 0.362838 + 2 3 4 -0.562860 1.267767 + + Select a column for aggregation + + >>> df.groupby('A').B.agg(['min', 'max']) + min max + A + 1 1 2 + 2 3 4 + + User-defined function for aggregation + + >>> df.groupby('A').agg(lambda x: sum(x) + 2) + B C + A + 1 5 2.590715 + 2 9 2.704907 + + Different aggregations per column + + >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'}) + B C + min max sum + A + 1 1 2 0.590715 + 2 3 4 0.704907 + + To control the output names with different aggregations per column, + pandas supports "named aggregation" + + >>> df.groupby("A").agg( + ... b_min=pd.NamedAgg(column="B", aggfunc="min"), + ... c_sum=pd.NamedAgg(column="C", aggfunc="sum") + ... ) + b_min c_sum + A + 1 1 0.590715 + 2 3 0.704907 + + - The keywords are the *output* column names + - The values are tuples whose first element is the column to select + and the second element is the aggregation to apply to that column. + Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields + ``['column', 'aggfunc']`` to make it clearer what the arguments are. + As usual, the aggregation can be a callable or a string alias. + + See :ref:`groupby.aggregate.named` for more. + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the aggregating function. + + >>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min()) + B + A + 1 1.0 + 2 3.0 + """ + ) + + @doc(_agg_template_frame, examples=_agg_examples_doc, klass="DataFrame") + def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): + relabeling, func, columns, order = reconstruct_func(func, **kwargs) + func = maybe_mangle_lambdas(func) + + if maybe_use_numba(engine): + # Not all agg functions support numba, only propagate numba kwargs + # if user asks for numba + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + + op = GroupByApply(self, func, args=args, kwargs=kwargs) + result = op.agg() + if not is_dict_like(func) and result is not None: + # GH #52849 + if not self.as_index and is_list_like(func): + return result.reset_index() + else: + return result + elif relabeling: + # this should be the only (non-raising) case with relabeling + # used reordered index of columns + result = cast(DataFrame, result) + result = result.iloc[:, order] + result = cast(DataFrame, result) + # error: Incompatible types in assignment (expression has type + # "Optional[List[str]]", variable has type + # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]], + # Index, Series], Sequence[Any]]") + result.columns = columns # type: ignore[assignment] + + if result is None: + # Remove the kwargs we inserted + # (already stored in engine, engine_kwargs arguments) + if "engine" in kwargs: + del kwargs["engine"] + del kwargs["engine_kwargs"] + # at this point func is not a str, list-like, dict-like, + # or a known callable(e.g. sum) + if maybe_use_numba(engine): + return self._aggregate_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + # grouper specific aggregations + if self._grouper.nkeys > 1: + # test_groupby_as_index_series_scalar gets here with 'not self.as_index' + return self._python_agg_general(func, *args, **kwargs) + elif args or kwargs: + # test_pass_args_kwargs gets here (with and without as_index) + # can't return early + result = self._aggregate_frame(func, *args, **kwargs) + + elif self.axis == 1: + # _aggregate_multiple_funcs does not allow self.axis == 1 + # Note: axis == 1 precludes 'not self.as_index', see __init__ + result = self._aggregate_frame(func) + return result + + else: + # try to treat as if we are passing a list + gba = GroupByApply(self, [func], args=(), kwargs={}) + try: + result = gba.agg() + + except ValueError as err: + if "No objects to concatenate" not in str(err): + raise + # _aggregate_frame can fail with e.g. func=Series.mode, + # where it expects 1D values but would be getting 2D values + # In other tests, using aggregate_frame instead of GroupByApply + # would give correct values but incorrect dtypes + # object vs float64 in test_cython_agg_empty_buckets + # float64 vs int64 in test_category_order_apply + result = self._aggregate_frame(func) + + else: + # GH#32040, GH#35246 + # e.g. test_groupby_as_index_select_column_sum_empty_df + result = cast(DataFrame, result) + result.columns = self._obj_with_exclusions.columns.copy() + + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + + return result + + agg = aggregate + + def _python_agg_general(self, func, *args, **kwargs): + orig_func = func + func = com.is_builtin_func(func) + if orig_func != func: + alias = com._builtin_table_alias[func] + warn_alias_replacement(self, orig_func, alias) + f = lambda x: func(x, *args, **kwargs) + + if self.ngroups == 0: + # e.g. test_evaluate_with_empty_groups different path gets different + # result dtype in empty case. + return self._python_apply_general(f, self._selected_obj, is_agg=True) + + obj = self._obj_with_exclusions + if self.axis == 1: + obj = obj.T + + if not len(obj.columns): + # e.g. test_margins_no_values_no_cols + return self._python_apply_general(f, self._selected_obj) + + output: dict[int, ArrayLike] = {} + for idx, (name, ser) in enumerate(obj.items()): + result = self._grouper.agg_series(ser, f) + output[idx] = result + + res = self.obj._constructor(output) + res.columns = obj.columns.copy(deep=False) + return self._wrap_aggregated_output(res) + + def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: + if self._grouper.nkeys != 1: + raise AssertionError("Number of keys must be 1") + + obj = self._obj_with_exclusions + + result: dict[Hashable, NDFrame | np.ndarray] = {} + for name, grp_df in self._grouper.get_iterator(obj, self.axis): + fres = func(grp_df, *args, **kwargs) + result[name] = fres + + result_index = self._grouper.result_index + other_ax = obj.axes[1 - self.axis] + out = self.obj._constructor(result, index=other_ax, columns=result_index) + if self.axis == 0: + out = out.T + + return out + + def _wrap_applied_output( + self, + data: DataFrame, + values: list, + not_indexed_same: bool = False, + is_transform: bool = False, + ): + if len(values) == 0: + if is_transform: + # GH#47787 see test_group_on_empty_multiindex + res_index = data.index + else: + res_index = self._grouper.result_index + + result = self.obj._constructor(index=res_index, columns=data.columns) + result = result.astype(data.dtypes, copy=False) + return result + + # GH12824 + # using values[0] here breaks test_groupby_apply_none_first + first_not_none = next(com.not_none(*values), None) + + if first_not_none is None: + # GH9684 - All values are None, return an empty frame. + return self.obj._constructor() + elif isinstance(first_not_none, DataFrame): + return self._concat_objects( + values, + not_indexed_same=not_indexed_same, + is_transform=is_transform, + ) + + key_index = self._grouper.result_index if self.as_index else None + + if isinstance(first_not_none, (np.ndarray, Index)): + # GH#1738: values is list of arrays of unequal lengths + # fall through to the outer else clause + # TODO: sure this is right? we used to do this + # after raising AttributeError above + # GH 18930 + if not is_hashable(self._selection): + # error: Need type annotation for "name" + name = tuple(self._selection) # type: ignore[var-annotated, arg-type] + else: + # error: Incompatible types in assignment + # (expression has type "Hashable", variable + # has type "Tuple[Any, ...]") + name = self._selection # type: ignore[assignment] + return self.obj._constructor_sliced(values, index=key_index, name=name) + elif not isinstance(first_not_none, Series): + # values are not series or array-like but scalars + # self._selection not passed through to Series as the + # result should not take the name of original selection + # of columns + if self.as_index: + return self.obj._constructor_sliced(values, index=key_index) + else: + result = self.obj._constructor(values, columns=[self._selection]) + result = self._insert_inaxis_grouper(result) + return result + else: + # values are Series + return self._wrap_applied_output_series( + values, + not_indexed_same, + first_not_none, + key_index, + is_transform, + ) + + def _wrap_applied_output_series( + self, + values: list[Series], + not_indexed_same: bool, + first_not_none, + key_index: Index | None, + is_transform: bool, + ) -> DataFrame | Series: + kwargs = first_not_none._construct_axes_dict() + backup = Series(**kwargs) + values = [x if (x is not None) else backup for x in values] + + all_indexed_same = all_indexes_same(x.index for x in values) + + if not all_indexed_same: + # GH 8467 + return self._concat_objects( + values, + not_indexed_same=True, + is_transform=is_transform, + ) + + # Combine values + # vstack+constructor is faster than concat and handles MI-columns + stacked_values = np.vstack([np.asarray(v) for v in values]) + + if self.axis == 0: + index = key_index + columns = first_not_none.index.copy() + if columns.name is None: + # GH6124 - propagate name of Series when it's consistent + names = {v.name for v in values} + if len(names) == 1: + columns.name = next(iter(names)) + else: + index = first_not_none.index + columns = key_index + stacked_values = stacked_values.T + + if stacked_values.dtype == object: + # We'll have the DataFrame constructor do inference + stacked_values = stacked_values.tolist() + result = self.obj._constructor(stacked_values, index=index, columns=columns) + + if not self.as_index: + result = self._insert_inaxis_grouper(result) + + return self._reindex_output(result) + + def _cython_transform( + self, + how: str, + numeric_only: bool = False, + axis: AxisInt = 0, + **kwargs, + ) -> DataFrame: + assert axis == 0 # handled by caller + + # With self.axis == 0, we have multi-block tests + # e.g. test_rank_min_int, test_cython_transform_frame + # test_transform_numeric_ret + # With self.axis == 1, _get_data_to_aggregate does a transpose + # so we always have a single block. + mgr: Manager2D = self._get_data_to_aggregate( + numeric_only=numeric_only, name=how + ) + + def arr_func(bvalues: ArrayLike) -> ArrayLike: + return self._grouper._cython_operation( + "transform", bvalues, how, 1, **kwargs + ) + + # We could use `mgr.apply` here and not have to set_axis, but + # we would have to do shape gymnastics for ArrayManager compat + res_mgr = mgr.grouped_reduce(arr_func) + res_mgr.set_axis(1, mgr.axes[1]) + + res_df = self.obj._constructor_from_mgr(res_mgr, axes=res_mgr.axes) + res_df = self._maybe_transpose_result(res_df) + return res_df + + def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs): + if maybe_use_numba(engine): + return self._transform_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + from pandas.core.reshape.concat import concat + + applied = [] + obj = self._obj_with_exclusions + gen = self._grouper.get_iterator(obj, axis=self.axis) + fast_path, slow_path = self._define_paths(func, *args, **kwargs) + + # Determine whether to use slow or fast path by evaluating on the first group. + # Need to handle the case of an empty generator and process the result so that + # it does not need to be computed again. + try: + name, group = next(gen) + except StopIteration: + pass + else: + # 2023-02-27 No tests broken by disabling this pinning + object.__setattr__(group, "name", name) + try: + path, res = self._choose_path(fast_path, slow_path, group) + except ValueError as err: + # e.g. test_transform_with_non_scalar_group + msg = "transform must return a scalar value for each group" + raise ValueError(msg) from err + if group.size > 0: + res = _wrap_transform_general_frame(self.obj, group, res) + applied.append(res) + + # Compute and process with the remaining groups + for name, group in gen: + if group.size == 0: + continue + # 2023-02-27 No tests broken by disabling this pinning + object.__setattr__(group, "name", name) + res = path(group) + + res = _wrap_transform_general_frame(self.obj, group, res) + applied.append(res) + + concat_index = obj.columns if self.axis == 0 else obj.index + other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1 + concatenated = concat(applied, axis=self.axis, verify_integrity=False) + concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False) + return self._set_result_index_ordered(concatenated) + + __examples_dataframe_doc = dedent( + """ + >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', + ... 'foo', 'bar'], + ... 'B' : ['one', 'one', 'two', 'three', + ... 'two', 'two'], + ... 'C' : [1, 5, 5, 2, 5, 5], + ... 'D' : [2.0, 5., 8., 1., 2., 9.]}) + >>> grouped = df.groupby('A')[['C', 'D']] + >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) + C D + 0 -1.154701 -0.577350 + 1 0.577350 0.000000 + 2 0.577350 1.154701 + 3 -1.154701 -1.000000 + 4 0.577350 -0.577350 + 5 0.577350 1.000000 + + Broadcast result of the transformation + + >>> grouped.transform(lambda x: x.max() - x.min()) + C D + 0 4.0 6.0 + 1 3.0 8.0 + 2 4.0 6.0 + 3 3.0 8.0 + 4 4.0 6.0 + 5 3.0 8.0 + + >>> grouped.transform("mean") + C D + 0 3.666667 4.0 + 1 4.000000 5.0 + 2 3.666667 4.0 + 3 4.000000 5.0 + 4 3.666667 4.0 + 5 4.000000 5.0 + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + for example: + + >>> grouped.transform(lambda x: x.astype(int).max()) + C D + 0 5 8 + 1 5 9 + 2 5 8 + 3 5 9 + 4 5 8 + 5 5 9 + """ + ) + + @Substitution(klass="DataFrame", example=__examples_dataframe_doc) + @Appender(_transform_template) + def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + return self._transform( + func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs + ) + + def _define_paths(self, func, *args, **kwargs): + if isinstance(func, str): + fast_path = lambda group: getattr(group, func)(*args, **kwargs) + slow_path = lambda group: group.apply( + lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis + ) + else: + fast_path = lambda group: func(group, *args, **kwargs) + slow_path = lambda group: group.apply( + lambda x: func(x, *args, **kwargs), axis=self.axis + ) + return fast_path, slow_path + + def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame): + path = slow_path + res = slow_path(group) + + if self.ngroups == 1: + # no need to evaluate multiple paths when only + # a single group exists + return path, res + + # if we make it here, test if we can use the fast path + try: + res_fast = fast_path(group) + except AssertionError: + raise # pragma: no cover + except Exception: + # GH#29631 For user-defined function, we can't predict what may be + # raised; see test_transform.test_transform_fastpath_raises + return path, res + + # verify fast path returns either: + # a DataFrame with columns equal to group.columns + # OR a Series with index equal to group.columns + if isinstance(res_fast, DataFrame): + if not res_fast.columns.equals(group.columns): + return path, res + elif isinstance(res_fast, Series): + if not res_fast.index.equals(group.columns): + return path, res + else: + return path, res + + if res_fast.equals(res): + path = fast_path + + return path, res + + def filter(self, func, dropna: bool = True, *args, **kwargs): + """ + Filter elements from groups that don't satisfy a criterion. + + Elements from groups are filtered if they do not satisfy the + boolean criterion specified by func. + + Parameters + ---------- + func : function + Criterion to apply to each group. Should return True or False. + dropna : bool + Drop groups that do not pass the filter. True by default; if False, + groups that evaluate False are filled with NaNs. + + Returns + ------- + DataFrame + + Notes + ----- + Each subframe is endowed the attribute 'name' in case you need to know + which group you are working on. + + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', + ... 'foo', 'bar'], + ... 'B' : [1, 2, 3, 4, 5, 6], + ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) + >>> grouped = df.groupby('A') + >>> grouped.filter(lambda x: x['B'].mean() > 3.) + A B C + 1 bar 2 5.0 + 3 bar 4 1.0 + 5 bar 6 9.0 + """ + indices = [] + + obj = self._selected_obj + gen = self._grouper.get_iterator(obj, axis=self.axis) + + for name, group in gen: + # 2023-02-27 no tests are broken this pinning, but it is documented in the + # docstring above. + object.__setattr__(group, "name", name) + + res = func(group, *args, **kwargs) + + try: + res = res.squeeze() + except AttributeError: # allow e.g., scalars and frames to pass + pass + + # interpret the result of the filter + if is_bool(res) or (is_scalar(res) and isna(res)): + if notna(res) and res: + indices.append(self._get_index(name)) + else: + # non scalars aren't allowed + raise TypeError( + f"filter function returned a {type(res).__name__}, " + "but expected a scalar bool" + ) + + return self._apply_filter(indices, dropna) + + def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy: + if self.axis == 1: + # GH 37725 + raise ValueError("Cannot subset columns when using axis=1") + # per GH 23566 + if isinstance(key, tuple) and len(key) > 1: + # if len == 1, then it becomes a SeriesGroupBy and this is actually + # valid syntax, so don't raise + raise ValueError( + "Cannot subset columns with a tuple with more than one element. " + "Use a list instead." + ) + return super().__getitem__(key) + + def _gotitem(self, key, ndim: int, subset=None): + """ + sub-classes to define + return a sliced object + + Parameters + ---------- + key : string / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + if ndim == 2: + if subset is None: + subset = self.obj + return DataFrameGroupBy( + subset, + self.keys, + axis=self.axis, + level=self.level, + grouper=self._grouper, + exclusions=self.exclusions, + selection=key, + as_index=self.as_index, + sort=self.sort, + group_keys=self.group_keys, + observed=self.observed, + dropna=self.dropna, + ) + elif ndim == 1: + if subset is None: + subset = self.obj[key] + return SeriesGroupBy( + subset, + self.keys, + level=self.level, + grouper=self._grouper, + exclusions=self.exclusions, + selection=key, + as_index=self.as_index, + sort=self.sort, + group_keys=self.group_keys, + observed=self.observed, + dropna=self.dropna, + ) + + raise AssertionError("invalid ndim for _gotitem") + + def _get_data_to_aggregate( + self, *, numeric_only: bool = False, name: str | None = None + ) -> Manager2D: + obj = self._obj_with_exclusions + if self.axis == 1: + mgr = obj.T._mgr + else: + mgr = obj._mgr + + if numeric_only: + mgr = mgr.get_numeric_data() + return mgr + + def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame: + return self.obj._constructor_from_mgr(mgr, axes=mgr.axes) + + def _apply_to_column_groupbys(self, func) -> DataFrame: + from pandas.core.reshape.concat import concat + + obj = self._obj_with_exclusions + columns = obj.columns + sgbs = [ + SeriesGroupBy( + obj.iloc[:, i], + selection=colname, + grouper=self._grouper, + exclusions=self.exclusions, + observed=self.observed, + ) + for i, colname in enumerate(obj.columns) + ] + results = [func(sgb) for sgb in sgbs] + + if not len(results): + # concat would raise + res_df = DataFrame([], columns=columns, index=self._grouper.result_index) + else: + res_df = concat(results, keys=columns, axis=1) + + if not self.as_index: + res_df.index = default_index(len(res_df)) + res_df = self._insert_inaxis_grouper(res_df) + return res_df + + def nunique(self, dropna: bool = True) -> DataFrame: + """ + Return DataFrame with counts of unique elements in each position. + + Parameters + ---------- + dropna : bool, default True + Don't include NaN in the counts. + + Returns + ------- + nunique: DataFrame + + Examples + -------- + >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', + ... 'ham', 'ham'], + ... 'value1': [1, 5, 5, 2, 5, 5], + ... 'value2': list('abbaxy')}) + >>> df + id value1 value2 + 0 spam 1 a + 1 egg 5 b + 2 egg 5 b + 3 spam 2 a + 4 ham 5 x + 5 ham 5 y + + >>> df.groupby('id').nunique() + value1 value2 + id + egg 1 1 + ham 1 2 + spam 2 1 + + Check for rows with the same id but conflicting values: + + >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) + id value1 value2 + 0 spam 1 a + 3 spam 2 a + 4 ham 5 x + 5 ham 5 y + """ + + if self.axis != 0: + # see test_groupby_crash_on_nunique + return self._python_apply_general( + lambda sgb: sgb.nunique(dropna), self._obj_with_exclusions, is_agg=True + ) + + return self._apply_to_column_groupbys(lambda sgb: sgb.nunique(dropna)) + + def idxmax( + self, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + ) -> DataFrame: + """ + Return index of first occurrence of maximum over requested axis. + + NA/null values are excluded. + + Parameters + ---------- + axis : {{0 or 'index', 1 or 'columns'}}, default None + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + If axis is not provided, grouper's axis is used. + + .. versionchanged:: 2.0.0 + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + Returns + ------- + Series + Indexes of maxima along the specified axis. + + Raises + ------ + ValueError + * If the row/column is empty + + See Also + -------- + Series.idxmax : Return index of the maximum element. + + Notes + ----- + This method is the DataFrame version of ``ndarray.argmax``. + + Examples + -------- + Consider a dataset containing food consumption in Argentina. + + >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], + ... 'co2_emissions': [37.2, 19.66, 1712]}, + ... index=['Pork', 'Wheat Products', 'Beef']) + + >>> df + consumption co2_emissions + Pork 10.51 37.20 + Wheat Products 103.11 19.66 + Beef 55.48 1712.00 + + By default, it returns the index for the maximum value in each column. + + >>> df.idxmax() + consumption Wheat Products + co2_emissions Beef + dtype: object + + To return the index for the maximum value in each row, use ``axis="columns"``. + + >>> df.idxmax(axis="columns") + Pork co2_emissions + Wheat Products consumption + Beef co2_emissions + dtype: object + """ + return self._idxmax_idxmin( + "idxmax", axis=axis, numeric_only=numeric_only, skipna=skipna + ) + + def idxmin( + self, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + ) -> DataFrame: + """ + Return index of first occurrence of minimum over requested axis. + + NA/null values are excluded. + + Parameters + ---------- + axis : {{0 or 'index', 1 or 'columns'}}, default None + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + If axis is not provided, grouper's axis is used. + + .. versionchanged:: 2.0.0 + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + Returns + ------- + Series + Indexes of minima along the specified axis. + + Raises + ------ + ValueError + * If the row/column is empty + + See Also + -------- + Series.idxmin : Return index of the minimum element. + + Notes + ----- + This method is the DataFrame version of ``ndarray.argmin``. + + Examples + -------- + Consider a dataset containing food consumption in Argentina. + + >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], + ... 'co2_emissions': [37.2, 19.66, 1712]}, + ... index=['Pork', 'Wheat Products', 'Beef']) + + >>> df + consumption co2_emissions + Pork 10.51 37.20 + Wheat Products 103.11 19.66 + Beef 55.48 1712.00 + + By default, it returns the index for the minimum value in each column. + + >>> df.idxmin() + consumption Pork + co2_emissions Wheat Products + dtype: object + + To return the index for the minimum value in each row, use ``axis="columns"``. + + >>> df.idxmin(axis="columns") + Pork consumption + Wheat Products co2_emissions + Beef consumption + dtype: object + """ + return self._idxmax_idxmin( + "idxmin", axis=axis, numeric_only=numeric_only, skipna=skipna + ) + + boxplot = boxplot_frame_groupby + + def value_counts( + self, + subset: Sequence[Hashable] | None = None, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + dropna: bool = True, + ) -> DataFrame | Series: + """ + Return a Series or DataFrame containing counts of unique rows. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + subset : list-like, optional + Columns to use when counting unique combinations. + normalize : bool, default False + Return proportions rather than frequencies. + sort : bool, default True + Sort by frequencies. + ascending : bool, default False + Sort in ascending order. + dropna : bool, default True + Don't include counts of rows that contain NA values. + + Returns + ------- + Series or DataFrame + Series if the groupby as_index is True, otherwise DataFrame. + + See Also + -------- + Series.value_counts: Equivalent method on Series. + DataFrame.value_counts: Equivalent method on DataFrame. + SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy. + + Notes + ----- + - If the groupby as_index is True then the returned Series will have a + MultiIndex with one level per input column. + - If the groupby as_index is False then the returned DataFrame will have an + additional column with the value_counts. The column is labelled 'count' or + 'proportion', depending on the ``normalize`` parameter. + + By default, rows that contain any NA values are omitted from + the result. + + By default, the result will be in descending order so that the + first element of each group is the most frequently-occurring row. + + Examples + -------- + >>> df = pd.DataFrame({ + ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'], + ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'], + ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR'] + ... }) + + >>> df + gender education country + 0 male low US + 1 male medium FR + 2 female high US + 3 male low FR + 4 female high FR + 5 male low FR + + >>> df.groupby('gender').value_counts() + gender education country + female high FR 1 + US 1 + male low FR 2 + US 1 + medium FR 1 + Name: count, dtype: int64 + + >>> df.groupby('gender').value_counts(ascending=True) + gender education country + female high FR 1 + US 1 + male low US 1 + medium FR 1 + low FR 2 + Name: count, dtype: int64 + + >>> df.groupby('gender').value_counts(normalize=True) + gender education country + female high FR 0.50 + US 0.50 + male low FR 0.50 + US 0.25 + medium FR 0.25 + Name: proportion, dtype: float64 + + >>> df.groupby('gender', as_index=False).value_counts() + gender education country count + 0 female high FR 1 + 1 female high US 1 + 2 male low FR 2 + 3 male low US 1 + 4 male medium FR 1 + + >>> df.groupby('gender', as_index=False).value_counts(normalize=True) + gender education country proportion + 0 female high FR 0.50 + 1 female high US 0.50 + 2 male low FR 0.50 + 3 male low US 0.25 + 4 male medium FR 0.25 + """ + return self._value_counts(subset, normalize, sort, ascending, dropna) + + def fillna( + self, + value: Hashable | Mapping | Series | DataFrame | None = None, + method: FillnaOptions | None = None, + axis: Axis | None | lib.NoDefault = lib.no_default, + inplace: bool = False, + limit: int | None = None, + downcast=lib.no_default, + ) -> DataFrame | None: + """ + Fill NA/NaN values using the specified method within groups. + + .. deprecated:: 2.2.0 + This method is deprecated and will be removed in a future version. + Use the :meth:`.DataFrameGroupBy.ffill` or :meth:`.DataFrameGroupBy.bfill` + for forward or backward filling instead. If you want to fill with a + single value, use :meth:`DataFrame.fillna` instead. + + Parameters + ---------- + value : scalar, dict, Series, or DataFrame + Value to use to fill holes (e.g. 0), alternately a + dict/Series/DataFrame of values specifying which value to use for + each index (for a Series) or column (for a DataFrame). Values not + in the dict/Series/DataFrame will not be filled. This value cannot + be a list. Users wanting to use the ``value`` argument and not ``method`` + should prefer :meth:`.DataFrame.fillna` as this + will produce the same result and be more performant. + method : {{'bfill', 'ffill', None}}, default None + Method to use for filling holes. ``'ffill'`` will propagate + the last valid observation forward within a group. + ``'bfill'`` will use next valid observation to fill the gap. + axis : {0 or 'index', 1 or 'columns'} + Axis along which to fill missing values. When the :class:`DataFrameGroupBy` + ``axis`` argument is ``0``, using ``axis=1`` here will produce + the same results as :meth:`.DataFrame.fillna`. When the + :class:`DataFrameGroupBy` ``axis`` argument is ``1``, using ``axis=0`` + or ``axis=1`` here will produce the same results. + inplace : bool, default False + Broken. Do not set to True. + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill within a group. In other words, + if there is a gap with more than this number of consecutive NaNs, + it will only be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. + downcast : dict, default is None + A dict of item->dtype of what to downcast if possible, + or the string 'infer' which will try to downcast to an appropriate + equal type (e.g. float64 to int64 if possible). + + Returns + ------- + DataFrame + Object with missing values filled. + + See Also + -------- + ffill : Forward fill values within a group. + bfill : Backward fill values within a group. + + Examples + -------- + >>> df = pd.DataFrame( + ... { + ... "key": [0, 0, 1, 1, 1], + ... "A": [np.nan, 2, np.nan, 3, np.nan], + ... "B": [2, 3, np.nan, np.nan, np.nan], + ... "C": [np.nan, np.nan, 2, np.nan, np.nan], + ... } + ... ) + >>> df + key A B C + 0 0 NaN 2.0 NaN + 1 0 2.0 3.0 NaN + 2 1 NaN NaN 2.0 + 3 1 3.0 NaN NaN + 4 1 NaN NaN NaN + + Propagate non-null values forward or backward within each group along columns. + + >>> df.groupby("key").fillna(method="ffill") + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN 2.0 + + >>> df.groupby("key").fillna(method="bfill") + A B C + 0 2.0 2.0 NaN + 1 2.0 3.0 NaN + 2 3.0 NaN 2.0 + 3 3.0 NaN NaN + 4 NaN NaN NaN + + Propagate non-null values forward or backward within each group along rows. + + >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="ffill").T + key A B C + 0 0.0 0.0 2.0 2.0 + 1 0.0 2.0 3.0 3.0 + 2 1.0 1.0 NaN 2.0 + 3 1.0 3.0 NaN NaN + 4 1.0 1.0 NaN NaN + + >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="bfill").T + key A B C + 0 0.0 NaN 2.0 NaN + 1 0.0 2.0 3.0 NaN + 2 1.0 NaN 2.0 2.0 + 3 1.0 3.0 NaN NaN + 4 1.0 NaN NaN NaN + + Only replace the first NaN element within a group along rows. + + >>> df.groupby("key").fillna(method="ffill", limit=1) + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN NaN + """ + warnings.warn( + f"{type(self).__name__}.fillna is deprecated and " + "will be removed in a future version. Use obj.ffill() or obj.bfill() " + "for forward or backward filling instead. If you want to fill with a " + f"single value, use {type(self.obj).__name__}.fillna instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + + result = self._op_via_apply( + "fillna", + value=value, + method=method, + axis=axis, + inplace=inplace, + limit=limit, + downcast=downcast, + ) + return result + + def take( + self, + indices: TakeIndexer, + axis: Axis | None | lib.NoDefault = lib.no_default, + **kwargs, + ) -> DataFrame: + """ + Return the elements in the given *positional* indices in each group. + + This means that we are not indexing according to actual values in + the index attribute of the object. We are indexing according to the + actual position of the element in the object. + + If a requested index does not exist for some group, this method will raise. + To get similar behavior that ignores indices that don't exist, see + :meth:`.DataFrameGroupBy.nth`. + + Parameters + ---------- + indices : array-like + An array of ints indicating which positions to take. + axis : {0 or 'index', 1 or 'columns', None}, default 0 + The axis on which to select elements. ``0`` means that we are + selecting rows, ``1`` means that we are selecting columns. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + **kwargs + For compatibility with :meth:`numpy.take`. Has no effect on the + output. + + Returns + ------- + DataFrame + An DataFrame containing the elements taken from each group. + + See Also + -------- + DataFrame.take : Take elements from a Series along an axis. + DataFrame.loc : Select a subset of a DataFrame by labels. + DataFrame.iloc : Select a subset of a DataFrame by positions. + numpy.take : Take elements from an array along an axis. + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey', 'mammal', np.nan), + ... ('rabbit', 'mammal', 15.0)], + ... columns=['name', 'class', 'max_speed'], + ... index=[4, 3, 2, 1, 0]) + >>> df + name class max_speed + 4 falcon bird 389.0 + 3 parrot bird 24.0 + 2 lion mammal 80.5 + 1 monkey mammal NaN + 0 rabbit mammal 15.0 + >>> gb = df.groupby([1, 1, 2, 2, 2]) + + Take elements at positions 0 and 1 along the axis 0 (default). + + Note how the indices selected in the result do not correspond to + our input indices 0 and 1. That's because we are selecting the 0th + and 1st rows, not rows whose indices equal 0 and 1. + + >>> gb.take([0, 1]) + name class max_speed + 1 4 falcon bird 389.0 + 3 parrot bird 24.0 + 2 2 lion mammal 80.5 + 1 monkey mammal NaN + + The order of the specified indices influences the order in the result. + Here, the order is swapped from the previous example. + + >>> gb.take([1, 0]) + name class max_speed + 1 3 parrot bird 24.0 + 4 falcon bird 389.0 + 2 1 monkey mammal NaN + 2 lion mammal 80.5 + + Take elements at indices 1 and 2 along the axis 1 (column selection). + + We may take elements using negative integers for positive indices, + starting from the end of the object, just like with Python lists. + + >>> gb.take([-1, -2]) + name class max_speed + 1 3 parrot bird 24.0 + 4 falcon bird 389.0 + 2 0 rabbit mammal 15.0 + 1 monkey mammal NaN + """ + result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs) + return result + + def skew( + self, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ) -> DataFrame: + """ + Return unbiased skew within groups. + + Normalized by N-1. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns', None}, default 0 + Axis for the function to be applied on. + + Specifying ``axis=None`` will apply the aggregation across both axes. + + .. versionadded:: 2.0.0 + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values when computing the result. + + numeric_only : bool, default False + Include only float, int, boolean columns. + + **kwargs + Additional keyword arguments to be passed to the function. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.skew : Return unbiased skew over requested axis. + + Examples + -------- + >>> arrays = [['falcon', 'parrot', 'cockatoo', 'kiwi', + ... 'lion', 'monkey', 'rabbit'], + ... ['bird', 'bird', 'bird', 'bird', + ... 'mammal', 'mammal', 'mammal']] + >>> index = pd.MultiIndex.from_arrays(arrays, names=('name', 'class')) + >>> df = pd.DataFrame({'max_speed': [389.0, 24.0, 70.0, np.nan, + ... 80.5, 21.5, 15.0]}, + ... index=index) + >>> df + max_speed + name class + falcon bird 389.0 + parrot bird 24.0 + cockatoo bird 70.0 + kiwi bird NaN + lion mammal 80.5 + monkey mammal 21.5 + rabbit mammal 15.0 + >>> gb = df.groupby(["class"]) + >>> gb.skew() + max_speed + class + bird 1.628296 + mammal 1.669046 + >>> gb.skew(skipna=False) + max_speed + class + bird NaN + mammal 1.669046 + """ + if axis is lib.no_default: + axis = 0 + + if axis != 0: + result = self._op_via_apply( + "skew", + axis=axis, + skipna=skipna, + numeric_only=numeric_only, + **kwargs, + ) + return result + + def alt(obj): + # This should not be reached since the cython path should raise + # TypeError and not NotImplementedError. + raise TypeError(f"'skew' is not supported for dtype={obj.dtype}") + + return self._cython_agg_general( + "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs + ) + + @property + @doc(DataFrame.plot.__doc__) + def plot(self) -> GroupByPlot: + result = GroupByPlot(self) + return result + + @doc(DataFrame.corr.__doc__) + def corr( + self, + method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson", + min_periods: int = 1, + numeric_only: bool = False, + ) -> DataFrame: + result = self._op_via_apply( + "corr", method=method, min_periods=min_periods, numeric_only=numeric_only + ) + return result + + @doc(DataFrame.cov.__doc__) + def cov( + self, + min_periods: int | None = None, + ddof: int | None = 1, + numeric_only: bool = False, + ) -> DataFrame: + result = self._op_via_apply( + "cov", min_periods=min_periods, ddof=ddof, numeric_only=numeric_only + ) + return result + + @doc(DataFrame.hist.__doc__) + def hist( + self, + column: IndexLabel | None = None, + by=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot: float | None = None, + ylabelsize: int | None = None, + yrot: float | None = None, + ax=None, + sharex: bool = False, + sharey: bool = False, + figsize: tuple[int, int] | None = None, + layout: tuple[int, int] | None = None, + bins: int | Sequence[int] = 10, + backend: str | None = None, + legend: bool = False, + **kwargs, + ): + result = self._op_via_apply( + "hist", + column=column, + by=by, + grid=grid, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + ax=ax, + sharex=sharex, + sharey=sharey, + figsize=figsize, + layout=layout, + bins=bins, + backend=backend, + legend=legend, + **kwargs, + ) + return result + + @property + @doc(DataFrame.dtypes.__doc__) + def dtypes(self) -> Series: + # GH#51045 + warnings.warn( + f"{type(self).__name__}.dtypes is deprecated and will be removed in " + "a future version. Check the dtypes on the base object instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + + # error: Incompatible return value type (got "DataFrame", expected "Series") + return self._python_apply_general( # type: ignore[return-value] + lambda df: df.dtypes, self._selected_obj + ) + + @doc(DataFrame.corrwith.__doc__) + def corrwith( + self, + other: DataFrame | Series, + axis: Axis | lib.NoDefault = lib.no_default, + drop: bool = False, + method: CorrelationMethod = "pearson", + numeric_only: bool = False, + ) -> DataFrame: + result = self._op_via_apply( + "corrwith", + other=other, + axis=axis, + drop=drop, + method=method, + numeric_only=numeric_only, + ) + return result + + +def _wrap_transform_general_frame( + obj: DataFrame, group: DataFrame, res: DataFrame | Series +) -> DataFrame: + from pandas import concat + + if isinstance(res, Series): + # we need to broadcast across the + # other dimension; this will preserve dtypes + # GH14457 + if res.index.is_(obj.index): + res_frame = concat([res] * len(group.columns), axis=1) + res_frame.columns = group.columns + res_frame.index = group.index + else: + res_frame = obj._constructor( + np.tile(res.values, (len(group.index), 1)), + columns=group.columns, + index=group.index, + ) + assert isinstance(res_frame, DataFrame) + return res_frame + elif isinstance(res, DataFrame) and not res.index.is_(group.index): + return res._align_frame(group)[0] + else: + return res diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/groupby.py b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..db8949788567b76bd8df0580d19dcc3e5069e923 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/groupby.py @@ -0,0 +1,5997 @@ +""" +Provide the groupby split-apply-combine paradigm. Define the GroupBy +class providing the base-class of operations. + +The SeriesGroupBy and DataFrameGroupBy sub-class +(defined in pandas.core.groupby.generic) +expose these user-facing objects to provide specific functionality. +""" +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterator, + Mapping, + Sequence, +) +import datetime +from functools import ( + partial, + wraps, +) +import inspect +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + TypeVar, + Union, + cast, + final, +) +import warnings + +import numpy as np + +from pandas._config.config import option_context + +from pandas._libs import ( + Timestamp, + lib, +) +from pandas._libs.algos import rank_1d +import pandas._libs.groupby as libgroupby +from pandas._libs.missing import NA +from pandas._typing import ( + AnyArrayLike, + ArrayLike, + Axis, + AxisInt, + DtypeObj, + FillnaOptions, + IndexLabel, + NDFrameT, + PositionalIndexer, + RandomState, + Scalar, + T, + npt, +) +from pandas.compat.numpy import function as nv +from pandas.errors import ( + AbstractMethodError, + DataError, +) +from pandas.util._decorators import ( + Appender, + Substitution, + cache_readonly, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + coerce_indexer_dtype, + ensure_dtype_can_hold_na, +) +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_float_dtype, + is_hashable, + is_integer, + is_integer_dtype, + is_list_like, + is_numeric_dtype, + is_object_dtype, + is_scalar, + needs_i8_conversion, + pandas_dtype, +) +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, + notna, +) + +from pandas.core import ( + algorithms, + sample, +) +from pandas.core._numba import executor +from pandas.core.apply import warn_alias_replacement +from pandas.core.arrays import ( + ArrowExtensionArray, + BaseMaskedArray, + Categorical, + ExtensionArray, + FloatingArray, + IntegerArray, + SparseArray, +) +from pandas.core.arrays.string_ import StringDtype +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, +) +from pandas.core.base import ( + PandasObject, + SelectionMixin, +) +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.core.generic import NDFrame +from pandas.core.groupby import ( + base, + numba_, + ops, +) +from pandas.core.groupby.grouper import get_grouper +from pandas.core.groupby.indexing import ( + GroupByIndexingMixin, + GroupByNthSelector, +) +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, + MultiIndex, + RangeIndex, + default_index, +) +from pandas.core.internals.blocks import ensure_block_shape +from pandas.core.series import Series +from pandas.core.sorting import get_group_index_sorter +from pandas.core.util.numba_ import ( + get_jit_arguments, + maybe_use_numba, +) + +if TYPE_CHECKING: + from typing import Any + + from pandas.core.resample import Resampler + from pandas.core.window import ( + ExpandingGroupby, + ExponentialMovingWindowGroupby, + RollingGroupby, + ) + +_common_see_also = """ + See Also + -------- + Series.%(name)s : Apply a function %(name)s to a Series. + DataFrame.%(name)s : Apply a function %(name)s + to each row or column of a DataFrame. +""" + +_apply_docs = { + "template": """ + Apply function ``func`` group-wise and combine the results together. + + The function passed to ``apply`` must take a {input} as its first + argument and return a DataFrame, Series or scalar. ``apply`` will + then take care of combining the results back together into a single + dataframe or series. ``apply`` is therefore a highly flexible + grouping method. + + While ``apply`` is a very flexible method, its downside is that + using it can be quite a bit slower than using more specific methods + like ``agg`` or ``transform``. Pandas offers a wide range of method that will + be much faster than using ``apply`` for their specific purposes, so try to + use them before reaching for ``apply``. + + Parameters + ---------- + func : callable + A callable that takes a {input} as its first argument, and + returns a dataframe, a series or a scalar. In addition the + callable may take positional and keyword arguments. + include_groups : bool, default True + When True, will attempt to apply ``func`` to the groupings in + the case that they are columns of the DataFrame. If this raises a + TypeError, the result will be computed with the groupings excluded. + When False, the groupings will be excluded when applying ``func``. + + .. versionadded:: 2.2.0 + + .. deprecated:: 2.2.0 + + Setting include_groups to True is deprecated. Only the value + False will be allowed in a future version of pandas. + + args, kwargs : tuple and dict + Optional positional and keyword arguments to pass to ``func``. + + Returns + ------- + Series or DataFrame + + See Also + -------- + pipe : Apply function to the full GroupBy object instead of to each + group. + aggregate : Apply aggregate function to the GroupBy object. + transform : Apply function column-by-column to the GroupBy object. + Series.apply : Apply a function to a Series. + DataFrame.apply : Apply a function to each row or column of a DataFrame. + + Notes + ----- + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. + + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + {examples} + """, + "dataframe_examples": """ + >>> df = pd.DataFrame({'A': 'a a b'.split(), + ... 'B': [1, 2, 3], + ... 'C': [4, 6, 5]}) + >>> g1 = df.groupby('A', group_keys=False) + >>> g2 = df.groupby('A', group_keys=True) + + Notice that ``g1`` and ``g2`` have two groups, ``a`` and ``b``, and only + differ in their ``group_keys`` argument. Calling `apply` in various ways, + we can get different grouping results: + + Example 1: below the function passed to `apply` takes a DataFrame as + its argument and returns a DataFrame. `apply` combines the result for + each group together into a new DataFrame: + + >>> g1[['B', 'C']].apply(lambda x: x / x.sum()) + B C + 0 0.333333 0.4 + 1 0.666667 0.6 + 2 1.000000 1.0 + + In the above, the groups are not part of the index. We can have them included + by using ``g2`` where ``group_keys=True``: + + >>> g2[['B', 'C']].apply(lambda x: x / x.sum()) + B C + A + a 0 0.333333 0.4 + 1 0.666667 0.6 + b 2 1.000000 1.0 + + Example 2: The function passed to `apply` takes a DataFrame as + its argument and returns a Series. `apply` combines the result for + each group together into a new DataFrame. + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``. + + >>> g1[['B', 'C']].apply(lambda x: x.astype(float).max() - x.min()) + B C + A + a 1.0 2.0 + b 0.0 0.0 + + >>> g2[['B', 'C']].apply(lambda x: x.astype(float).max() - x.min()) + B C + A + a 1.0 2.0 + b 0.0 0.0 + + The ``group_keys`` argument has no effect here because the result is not + like-indexed (i.e. :ref:`a transform `) when compared + to the input. + + Example 3: The function passed to `apply` takes a DataFrame as + its argument and returns a scalar. `apply` combines the result for + each group together into a Series, including setting the index as + appropriate: + + >>> g1.apply(lambda x: x.C.max() - x.B.min(), include_groups=False) + A + a 5 + b 2 + dtype: int64""", + "series_examples": """ + >>> s = pd.Series([0, 1, 2], index='a a b'.split()) + >>> g1 = s.groupby(s.index, group_keys=False) + >>> g2 = s.groupby(s.index, group_keys=True) + + From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``. + Notice that ``g1`` have ``g2`` have two groups, ``a`` and ``b``, and only + differ in their ``group_keys`` argument. Calling `apply` in various ways, + we can get different grouping results: + + Example 1: The function passed to `apply` takes a Series as + its argument and returns a Series. `apply` combines the result for + each group together into a new Series. + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``. + + >>> g1.apply(lambda x: x * 2 if x.name == 'a' else x / 2) + a 0.0 + a 2.0 + b 1.0 + dtype: float64 + + In the above, the groups are not part of the index. We can have them included + by using ``g2`` where ``group_keys=True``: + + >>> g2.apply(lambda x: x * 2 if x.name == 'a' else x / 2) + a a 0.0 + a 2.0 + b b 1.0 + dtype: float64 + + Example 2: The function passed to `apply` takes a Series as + its argument and returns a scalar. `apply` combines the result for + each group together into a Series, including setting the index as + appropriate: + + >>> g1.apply(lambda x: x.max() - x.min()) + a 1 + b 0 + dtype: int64 + + The ``group_keys`` argument has no effect here because the result is not + like-indexed (i.e. :ref:`a transform `) when compared + to the input. + + >>> g2.apply(lambda x: x.max() - x.min()) + a 1 + b 0 + dtype: int64""", +} + +_groupby_agg_method_template = """ +Compute {fname} of group values. + +Parameters +---------- +numeric_only : bool, default {no} + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None``. + +min_count : int, default {mc} + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + +Returns +------- +Series or DataFrame + Computed {fname} of values within each group. + +Examples +-------- +{example} +""" + +_groupby_agg_method_engine_template = """ +Compute {fname} of group values. + +Parameters +---------- +numeric_only : bool, default {no} + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None``. + +min_count : int, default {mc} + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + +engine : str, default None {e} + * ``'cython'`` : Runs rolling apply through C-extensions from cython. + * ``'numba'`` : Runs rolling apply through JIT compiled code from numba. + Only available when ``raw`` is set to ``True``. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + +engine_kwargs : dict, default None {ek} + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to both the ``func`` and the ``apply`` groupby aggregation. + +Returns +------- +Series or DataFrame + Computed {fname} of values within each group. + +Examples +-------- +{example} +""" + +_pipe_template = """ +Apply a ``func`` with arguments to this %(klass)s object and return its result. + +Use `.pipe` when you want to improve readability by chaining together +functions that expect Series, DataFrames, GroupBy or Resampler objects. +Instead of writing + +>>> h = lambda x, arg2, arg3: x + 1 - arg2 * arg3 +>>> g = lambda x, arg1: x * 5 / arg1 +>>> f = lambda x: x ** 4 +>>> df = pd.DataFrame([["a", 4], ["b", 5]], columns=["group", "value"]) +>>> h(g(f(df.groupby('group')), arg1=1), arg2=2, arg3=3) # doctest: +SKIP + +You can write + +>>> (df.groupby('group') +... .pipe(f) +... .pipe(g, arg1=1) +... .pipe(h, arg2=2, arg3=3)) # doctest: +SKIP + +which is much more readable. + +Parameters +---------- +func : callable or tuple of (callable, str) + Function to apply to this %(klass)s object or, alternatively, + a `(callable, data_keyword)` tuple where `data_keyword` is a + string indicating the keyword of `callable` that expects the + %(klass)s object. +args : iterable, optional + Positional arguments passed into `func`. +kwargs : dict, optional + A dictionary of keyword arguments passed into `func`. + +Returns +------- +the return type of `func`. + +See Also +-------- +Series.pipe : Apply a function with arguments to a series. +DataFrame.pipe: Apply a function with arguments to a dataframe. +apply : Apply function to each group instead of to the + full %(klass)s object. + +Notes +----- +See more `here +`_ + +Examples +-------- +%(examples)s +""" + +_transform_template = """ +Call function producing a same-indexed %(klass)s on each group. + +Returns a %(klass)s having the same indexes as the original object +filled with the transformed values. + +Parameters +---------- +f : function, str + Function to apply to each group. See the Notes section below for requirements. + + Accepted inputs are: + + - String + - Python function + - Numba JIT function with ``engine='numba'`` specified. + + Only passing a single function is supported with this engine. + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + + If a string is chosen, then it needs to be the name + of the groupby method you want to use. +*args + Positional arguments to pass to func. +engine : str, default None + * ``'cython'`` : Runs the function through C-extensions from cython. + * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or the global setting ``compute.use_numba`` + +engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be + applied to the function + +**kwargs + Keyword arguments to be passed into func. + +Returns +------- +%(klass)s + +See Also +-------- +%(klass)s.groupby.apply : Apply function ``func`` group-wise and combine + the results together. +%(klass)s.groupby.aggregate : Aggregate using one or more + operations over the specified axis. +%(klass)s.transform : Call ``func`` on self producing a %(klass)s with the + same axis shape as self. + +Notes +----- +Each group is endowed the attribute 'name' in case you need to know +which group you are working on. + +The current implementation imposes three requirements on f: + +* f must return a value that either has the same shape as the input + subframe or can be broadcast to the shape of the input subframe. + For example, if `f` returns a scalar it will be broadcast to have the + same shape as the input subframe. +* if this is a DataFrame, f must support application column-by-column + in the subframe. If f also supports application to the entire subframe, + then a fast path is used starting from the second chunk. +* f must not mutate groups. Mutation is not supported and may + produce unexpected results. See :ref:`gotchas.udf-mutation` for more details. + +When using ``engine='numba'``, there will be no "fall back" behavior internally. +The group data and group index will be passed as numpy arrays to the JITed +user defined function, and no alternative execution attempts will be tried. + +.. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. + +.. versionchanged:: 2.0.0 + + When using ``.transform`` on a grouped DataFrame and the transformation function + returns a DataFrame, pandas now aligns the result's index + with the input's index. You can call ``.to_numpy()`` on the + result of the transformation function to avoid alignment. + +Examples +-------- +%(example)s""" + +_agg_template_series = """ +Aggregate using one or more operations over the specified axis. + +Parameters +---------- +func : function, str, list, dict or None + Function to use for aggregating the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` + - None, in which case ``**kwargs`` are used with Named Aggregation. Here the + output has one column for each element in ``**kwargs``. The name of the + column is keyword, whereas the value determines the aggregation used to compute + the values in the column. + + Can also accept a Numba JIT function with + ``engine='numba'`` specified. Only passing a single function is supported + with this engine. + + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + + .. deprecated:: 2.1.0 + + Passing a dictionary is deprecated and will raise in a future version + of pandas. Pass a list of aggregations instead. +*args + Positional arguments to pass to func. +engine : str, default None + * ``'cython'`` : Runs the function through C-extensions from cython. + * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + +engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to the function + +**kwargs + * If ``func`` is None, ``**kwargs`` are used to define the output names and + aggregations via Named Aggregation. See ``func`` entry. + * Otherwise, keyword arguments to be passed into func. + +Returns +------- +{klass} + +See Also +-------- +{klass}.groupby.apply : Apply function func group-wise + and combine the results together. +{klass}.groupby.transform : Transforms the Series on each group + based on the given function. +{klass}.aggregate : Aggregate using one or more + operations over the specified axis. + +Notes +----- +When using ``engine='numba'``, there will be no "fall back" behavior internally. +The group data and group index will be passed as numpy arrays to the JITed +user defined function, and no alternative execution attempts will be tried. + +Functions that mutate the passed object can produce unexpected +behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` +for more details. + +.. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. +{examples}""" + +_agg_template_frame = """ +Aggregate using one or more operations over the specified axis. + +Parameters +---------- +func : function, str, list, dict or None + Function to use for aggregating the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` + - dict of axis labels -> functions, function names or list of such. + - None, in which case ``**kwargs`` are used with Named Aggregation. Here the + output has one column for each element in ``**kwargs``. The name of the + column is keyword, whereas the value determines the aggregation used to compute + the values in the column. + + Can also accept a Numba JIT function with + ``engine='numba'`` specified. Only passing a single function is supported + with this engine. + + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + +*args + Positional arguments to pass to func. +engine : str, default None + * ``'cython'`` : Runs the function through C-extensions from cython. + * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + +engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to the function + +**kwargs + * If ``func`` is None, ``**kwargs`` are used to define the output names and + aggregations via Named Aggregation. See ``func`` entry. + * Otherwise, keyword arguments to be passed into func. + +Returns +------- +{klass} + +See Also +-------- +{klass}.groupby.apply : Apply function func group-wise + and combine the results together. +{klass}.groupby.transform : Transforms the Series on each group + based on the given function. +{klass}.aggregate : Aggregate using one or more + operations over the specified axis. + +Notes +----- +When using ``engine='numba'``, there will be no "fall back" behavior internally. +The group data and group index will be passed as numpy arrays to the JITed +user defined function, and no alternative execution attempts will be tried. + +Functions that mutate the passed object can produce unexpected +behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` +for more details. + +.. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. +{examples}""" + + +@final +class GroupByPlot(PandasObject): + """ + Class implementing the .plot attribute for groupby objects. + """ + + def __init__(self, groupby: GroupBy) -> None: + self._groupby = groupby + + def __call__(self, *args, **kwargs): + def f(self): + return self.plot(*args, **kwargs) + + f.__name__ = "plot" + return self._groupby._python_apply_general(f, self._groupby._selected_obj) + + def __getattr__(self, name: str): + def attr(*args, **kwargs): + def f(self): + return getattr(self.plot, name)(*args, **kwargs) + + return self._groupby._python_apply_general(f, self._groupby._selected_obj) + + return attr + + +_KeysArgType = Union[ + Hashable, + list[Hashable], + Callable[[Hashable], Hashable], + list[Callable[[Hashable], Hashable]], + Mapping[Hashable, Hashable], +] + + +class BaseGroupBy(PandasObject, SelectionMixin[NDFrameT], GroupByIndexingMixin): + _hidden_attrs = PandasObject._hidden_attrs | { + "as_index", + "axis", + "dropna", + "exclusions", + "grouper", + "group_keys", + "keys", + "level", + "obj", + "observed", + "sort", + } + + axis: AxisInt + _grouper: ops.BaseGrouper + keys: _KeysArgType | None = None + level: IndexLabel | None = None + group_keys: bool + + @final + def __len__(self) -> int: + return len(self.groups) + + @final + def __repr__(self) -> str: + # TODO: Better repr for GroupBy object + return object.__repr__(self) + + @final + @property + def grouper(self) -> ops.BaseGrouper: + warnings.warn( + f"{type(self).__name__}.grouper is deprecated and will be removed in a " + "future version of pandas.", + category=FutureWarning, + stacklevel=find_stack_level(), + ) + return self._grouper + + @final + @property + def groups(self) -> dict[Hashable, np.ndarray]: + """ + Dict {group name -> group labels}. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).groups + {'a': ['a', 'a'], 'b': ['b']} + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"]) + >>> df + a b c + 0 1 2 3 + 1 1 5 6 + 2 7 8 9 + >>> df.groupby(by=["a"]).groups + {1: [0, 1], 7: [2]} + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').groups + {Timestamp('2023-01-01 00:00:00'): 2, Timestamp('2023-02-01 00:00:00'): 4} + """ + return self._grouper.groups + + @final + @property + def ngroups(self) -> int: + return self._grouper.ngroups + + @final + @property + def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: + """ + Dict {group name -> group indices}. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).indices + {'a': array([0, 1]), 'b': array([2])} + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["owl", "toucan", "eagle"]) + >>> df + a b c + owl 1 2 3 + toucan 1 5 6 + eagle 7 8 9 + >>> df.groupby(by=["a"]).indices + {1: array([0, 1]), 7: array([2])} + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').indices + defaultdict(, {Timestamp('2023-01-01 00:00:00'): [0, 1], + Timestamp('2023-02-01 00:00:00'): [2, 3]}) + """ + return self._grouper.indices + + @final + def _get_indices(self, names): + """ + Safe get multiple indices, translate keys for + datelike to underlying repr. + """ + + def get_converter(s): + # possibly convert to the actual key types + # in the indices, could be a Timestamp or a np.datetime64 + if isinstance(s, datetime.datetime): + return lambda key: Timestamp(key) + elif isinstance(s, np.datetime64): + return lambda key: Timestamp(key).asm8 + else: + return lambda key: key + + if len(names) == 0: + return [] + + if len(self.indices) > 0: + index_sample = next(iter(self.indices)) + else: + index_sample = None # Dummy sample + + name_sample = names[0] + if isinstance(index_sample, tuple): + if not isinstance(name_sample, tuple): + msg = "must supply a tuple to get_group with multiple grouping keys" + raise ValueError(msg) + if not len(name_sample) == len(index_sample): + try: + # If the original grouper was a tuple + return [self.indices[name] for name in names] + except KeyError as err: + # turns out it wasn't a tuple + msg = ( + "must supply a same-length tuple to get_group " + "with multiple grouping keys" + ) + raise ValueError(msg) from err + + converters = [get_converter(s) for s in index_sample] + names = (tuple(f(n) for f, n in zip(converters, name)) for name in names) + + else: + converter = get_converter(index_sample) + names = (converter(name) for name in names) + + return [self.indices.get(name, []) for name in names] + + @final + def _get_index(self, name): + """ + Safe get index, translate keys for datelike to underlying repr. + """ + return self._get_indices([name])[0] + + @final + @cache_readonly + def _selected_obj(self): + # Note: _selected_obj is always just `self.obj` for SeriesGroupBy + if isinstance(self.obj, Series): + return self.obj + + if self._selection is not None: + if is_hashable(self._selection): + # i.e. a single key, so selecting it will return a Series. + # In this case, _obj_with_exclusions would wrap the key + # in a list and return a single-column DataFrame. + return self.obj[self._selection] + + # Otherwise _selection is equivalent to _selection_list, so + # _selected_obj matches _obj_with_exclusions, so we can reuse + # that and avoid making a copy. + return self._obj_with_exclusions + + return self.obj + + @final + def _dir_additions(self) -> set[str]: + return self.obj._dir_additions() + + @Substitution( + klass="GroupBy", + examples=dedent( + """\ + >>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]}) + >>> df + A B + 0 a 1 + 1 b 2 + 2 a 3 + 3 b 4 + + To get the difference between each groups maximum and minimum value in one + pass, you can do + + >>> df.groupby('A').pipe(lambda x: x.max() - x.min()) + B + A + a 2 + b 2""" + ), + ) + @Appender(_pipe_template) + def pipe( + self, + func: Callable[..., T] | tuple[Callable[..., T], str], + *args, + **kwargs, + ) -> T: + return com.pipe(self, func, *args, **kwargs) + + @final + def get_group(self, name, obj=None) -> DataFrame | Series: + """ + Construct DataFrame from group with provided name. + + Parameters + ---------- + name : object + The name of the group to get as a DataFrame. + obj : DataFrame, default None + The DataFrame to take the DataFrame out of. If + it is None, the object groupby was called on will + be used. + + .. deprecated:: 2.1.0 + The obj is deprecated and will be removed in a future version. + Do ``df.iloc[gb.indices.get(name)]`` + instead of ``gb.get_group(name, obj=df)``. + + Returns + ------- + same type as obj + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).get_group("a") + a 1 + a 2 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["owl", "toucan", "eagle"]) + >>> df + a b c + owl 1 2 3 + toucan 1 5 6 + eagle 7 8 9 + >>> df.groupby(by=["a"]).get_group((1,)) + a b c + owl 1 2 3 + toucan 1 5 6 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').get_group('2023-01-01') + 2023-01-01 1 + 2023-01-15 2 + dtype: int64 + """ + keys = self.keys + level = self.level + # mypy doesn't recognize level/keys as being sized when passed to len + if (is_list_like(level) and len(level) == 1) or ( # type: ignore[arg-type] + is_list_like(keys) and len(keys) == 1 # type: ignore[arg-type] + ): + # GH#25971 + if isinstance(name, tuple) and len(name) == 1: + # Allow users to pass tuples of length 1 to silence warning + name = name[0] + elif not isinstance(name, tuple): + warnings.warn( + "When grouping with a length-1 list-like, " + "you will need to pass a length-1 tuple to get_group in a future " + "version of pandas. Pass `(name,)` instead of `name` to silence " + "this warning.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + inds = self._get_index(name) + if not len(inds): + raise KeyError(name) + + if obj is None: + indexer = inds if self.axis == 0 else (slice(None), inds) + return self._selected_obj.iloc[indexer] + else: + warnings.warn( + "obj is deprecated and will be removed in a future version. " + "Do ``df.iloc[gb.indices.get(name)]`` " + "instead of ``gb.get_group(name, obj=df)``.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return obj._take_with_is_copy(inds, axis=self.axis) + + @final + def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]: + """ + Groupby iterator. + + Returns + ------- + Generator yielding sequence of (name, subsetted object) + for each group + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> for x, y in ser.groupby(level=0): + ... print(f'{x}\\n{y}\\n') + a + a 1 + a 2 + dtype: int64 + b + b 3 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"]) + >>> df + a b c + 0 1 2 3 + 1 1 5 6 + 2 7 8 9 + >>> for x, y in df.groupby(by=["a"]): + ... print(f'{x}\\n{y}\\n') + (1,) + a b c + 0 1 2 3 + 1 1 5 6 + (7,) + a b c + 2 7 8 9 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> for x, y in ser.resample('MS'): + ... print(f'{x}\\n{y}\\n') + 2023-01-01 00:00:00 + 2023-01-01 1 + 2023-01-15 2 + dtype: int64 + 2023-02-01 00:00:00 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + """ + keys = self.keys + level = self.level + result = self._grouper.get_iterator(self._selected_obj, axis=self.axis) + # error: Argument 1 to "len" has incompatible type "Hashable"; expected "Sized" + if is_list_like(level) and len(level) == 1: # type: ignore[arg-type] + # GH 51583 + warnings.warn( + "Creating a Groupby object with a length-1 list-like " + "level parameter will yield indexes as tuples in a future version. " + "To keep indexes as scalars, create Groupby objects with " + "a scalar level parameter instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if isinstance(keys, list) and len(keys) == 1: + # GH#42795 - when keys is a list, return tuples even when length is 1 + result = (((key,), group) for key, group in result) + return result + + +# To track operations that expand dimensions, like ohlc +OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame) + + +class GroupBy(BaseGroupBy[NDFrameT]): + """ + Class for grouping and aggregating relational data. + + See aggregate, transform, and apply functions on this object. + + It's easiest to use obj.groupby(...) to use GroupBy, but you can also do: + + :: + + grouped = groupby(obj, ...) + + Parameters + ---------- + obj : pandas object + axis : int, default 0 + level : int, default None + Level of MultiIndex + groupings : list of Grouping objects + Most users should ignore this + exclusions : array-like, optional + List of columns to exclude + name : str + Most users should ignore this + + Returns + ------- + **Attributes** + groups : dict + {group name -> group labels} + len(grouped) : int + Number of groups + + Notes + ----- + After grouping, see aggregate, apply, and transform functions. Here are + some other brief notes about usage. When grouping by multiple groups, the + result index will be a MultiIndex (hierarchical) by default. + + Iteration produces (key, group) tuples, i.e. chunking the data by group. So + you can write code like: + + :: + + grouped = obj.groupby(keys, axis=axis) + for key, group in grouped: + # do something with the data + + Function calls on GroupBy, if not specially implemented, "dispatch" to the + grouped data. So if you group a DataFrame and wish to invoke the std() + method on each group, you can simply do: + + :: + + df.groupby(mapper).std() + + rather than + + :: + + df.groupby(mapper).aggregate(np.std) + + You can pass arguments to these "wrapped" functions, too. + + See the online documentation for full exposition on these topics and much + more + """ + + _grouper: ops.BaseGrouper + as_index: bool + + @final + def __init__( + self, + obj: NDFrameT, + keys: _KeysArgType | None = None, + axis: Axis = 0, + level: IndexLabel | None = None, + grouper: ops.BaseGrouper | None = None, + exclusions: frozenset[Hashable] | None = None, + selection: IndexLabel | None = None, + as_index: bool = True, + sort: bool = True, + group_keys: bool = True, + observed: bool | lib.NoDefault = lib.no_default, + dropna: bool = True, + ) -> None: + self._selection = selection + + assert isinstance(obj, NDFrame), type(obj) + + self.level = level + + if not as_index: + if axis != 0: + raise ValueError("as_index=False only valid for axis=0") + + self.as_index = as_index + self.keys = keys + self.sort = sort + self.group_keys = group_keys + self.dropna = dropna + + if grouper is None: + grouper, exclusions, obj = get_grouper( + obj, + keys, + axis=axis, + level=level, + sort=sort, + observed=False if observed is lib.no_default else observed, + dropna=self.dropna, + ) + + if observed is lib.no_default: + if any(ping._passed_categorical for ping in grouper.groupings): + warnings.warn( + "The default of observed=False is deprecated and will be changed " + "to True in a future version of pandas. Pass observed=False to " + "retain current behavior or observed=True to adopt the future " + "default and silence this warning.", + FutureWarning, + stacklevel=find_stack_level(), + ) + observed = False + self.observed = observed + + self.obj = obj + self.axis = obj._get_axis_number(axis) + self._grouper = grouper + self.exclusions = frozenset(exclusions) if exclusions else frozenset() + + def __getattr__(self, attr: str): + if attr in self._internal_names_set: + return object.__getattribute__(self, attr) + if attr in self.obj: + return self[attr] + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{attr}'" + ) + + @final + def _deprecate_axis(self, axis: int, name: str) -> None: + if axis == 1: + warnings.warn( + f"{type(self).__name__}.{name} with axis=1 is deprecated and " + "will be removed in a future version. Operate on the un-grouped " + "DataFrame instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + warnings.warn( + f"The 'axis' keyword in {type(self).__name__}.{name} is deprecated " + "and will be removed in a future version. " + "Call without passing 'axis' instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + @final + def _op_via_apply(self, name: str, *args, **kwargs): + """Compute the result of an operation by using GroupBy's apply.""" + f = getattr(type(self._obj_with_exclusions), name) + sig = inspect.signature(f) + + if "axis" in kwargs and kwargs["axis"] is not lib.no_default: + axis = self.obj._get_axis_number(kwargs["axis"]) + self._deprecate_axis(axis, name) + elif "axis" in kwargs: + # exclude skew here because that was already defaulting to lib.no_default + # before this deprecation was instituted + if name == "skew": + pass + elif name == "fillna": + # maintain the behavior from before the deprecation + kwargs["axis"] = None + else: + kwargs["axis"] = 0 + + # a little trickery for aggregation functions that need an axis + # argument + if "axis" in sig.parameters: + if kwargs.get("axis", None) is None or kwargs.get("axis") is lib.no_default: + kwargs["axis"] = self.axis + + def curried(x): + return f(x, *args, **kwargs) + + # preserve the name so we can detect it when calling plot methods, + # to avoid duplicates + curried.__name__ = name + + # special case otherwise extra plots are created when catching the + # exception below + if name in base.plotting_methods: + return self._python_apply_general(curried, self._selected_obj) + + is_transform = name in base.transformation_kernels + result = self._python_apply_general( + curried, + self._obj_with_exclusions, + is_transform=is_transform, + not_indexed_same=not is_transform, + ) + + if self._grouper.has_dropped_na and is_transform: + # result will have dropped rows due to nans, fill with null + # and ensure index is ordered same as the input + result = self._set_result_index_ordered(result) + return result + + # ----------------------------------------------------------------- + # Dispatch/Wrapping + + @final + def _concat_objects( + self, + values, + not_indexed_same: bool = False, + is_transform: bool = False, + ): + from pandas.core.reshape.concat import concat + + if self.group_keys and not is_transform: + if self.as_index: + # possible MI return case + group_keys = self._grouper.result_index + group_levels = self._grouper.levels + group_names = self._grouper.names + + result = concat( + values, + axis=self.axis, + keys=group_keys, + levels=group_levels, + names=group_names, + sort=False, + ) + else: + # GH5610, returns a MI, with the first level being a + # range index + keys = list(range(len(values))) + result = concat(values, axis=self.axis, keys=keys) + + elif not not_indexed_same: + result = concat(values, axis=self.axis) + + ax = self._selected_obj._get_axis(self.axis) + if self.dropna: + labels = self._grouper.group_info[0] + mask = labels != -1 + ax = ax[mask] + + # this is a very unfortunate situation + # we can't use reindex to restore the original order + # when the ax has duplicates + # so we resort to this + # GH 14776, 30667 + # TODO: can we reuse e.g. _reindex_non_unique? + if ax.has_duplicates and not result.axes[self.axis].equals(ax): + # e.g. test_category_order_transformer + target = algorithms.unique1d(ax._values) + indexer, _ = result.index.get_indexer_non_unique(target) + result = result.take(indexer, axis=self.axis) + else: + result = result.reindex(ax, axis=self.axis, copy=False) + + else: + result = concat(values, axis=self.axis) + + if self.obj.ndim == 1: + name = self.obj.name + elif is_hashable(self._selection): + name = self._selection + else: + name = None + + if isinstance(result, Series) and name is not None: + result.name = name + + return result + + @final + def _set_result_index_ordered( + self, result: OutputFrameOrSeries + ) -> OutputFrameOrSeries: + # set the result index on the passed values object and + # return the new object, xref 8046 + + obj_axis = self.obj._get_axis(self.axis) + + if self._grouper.is_monotonic and not self._grouper.has_dropped_na: + # shortcut if we have an already ordered grouper + result = result.set_axis(obj_axis, axis=self.axis, copy=False) + return result + + # row order is scrambled => sort the rows by position in original index + original_positions = Index(self._grouper.result_ilocs()) + result = result.set_axis(original_positions, axis=self.axis, copy=False) + result = result.sort_index(axis=self.axis) + if self._grouper.has_dropped_na: + # Add back in any missing rows due to dropna - index here is integral + # with values referring to the row of the input so can use RangeIndex + result = result.reindex(RangeIndex(len(obj_axis)), axis=self.axis) + result = result.set_axis(obj_axis, axis=self.axis, copy=False) + + return result + + @final + def _insert_inaxis_grouper(self, result: Series | DataFrame) -> DataFrame: + if isinstance(result, Series): + result = result.to_frame() + + # zip in reverse so we can always insert at loc 0 + columns = result.columns + for name, lev, in_axis in zip( + reversed(self._grouper.names), + reversed(self._grouper.get_group_levels()), + reversed([grp.in_axis for grp in self._grouper.groupings]), + ): + # GH #28549 + # When using .apply(-), name will be in columns already + if name not in columns: + if in_axis: + result.insert(0, name, lev) + else: + msg = ( + "A grouping was used that is not in the columns of the " + "DataFrame and so was excluded from the result. This grouping " + "will be included in a future version of pandas. Add the " + "grouping as a column of the DataFrame to silence this warning." + ) + warnings.warn( + message=msg, + category=FutureWarning, + stacklevel=find_stack_level(), + ) + + return result + + @final + def _maybe_transpose_result(self, result: NDFrameT) -> NDFrameT: + if self.axis == 1: + # Only relevant for DataFrameGroupBy, no-op for SeriesGroupBy + result = result.T + if result.index.equals(self.obj.index): + # Retain e.g. DatetimeIndex/TimedeltaIndex freq + # e.g. test_groupby_crash_on_nunique + result.index = self.obj.index.copy() + return result + + @final + def _wrap_aggregated_output( + self, + result: Series | DataFrame, + qs: npt.NDArray[np.float64] | None = None, + ): + """ + Wraps the output of GroupBy aggregations into the expected result. + + Parameters + ---------- + result : Series, DataFrame + + Returns + ------- + Series or DataFrame + """ + # ATM we do not get here for SeriesGroupBy; when we do, we will + # need to require that result.name already match self.obj.name + + if not self.as_index: + # `not self.as_index` is only relevant for DataFrameGroupBy, + # enforced in __init__ + result = self._insert_inaxis_grouper(result) + result = result._consolidate() + index = Index(range(self._grouper.ngroups)) + + else: + index = self._grouper.result_index + + if qs is not None: + # We get here with len(qs) != 1 and not self.as_index + # in test_pass_args_kwargs + index = _insert_quantile_level(index, qs) + + result.index = index + + # error: Argument 1 to "_maybe_transpose_result" of "GroupBy" has + # incompatible type "Union[Series, DataFrame]"; expected "NDFrameT" + res = self._maybe_transpose_result(result) # type: ignore[arg-type] + return self._reindex_output(res, qs=qs) + + def _wrap_applied_output( + self, + data, + values: list, + not_indexed_same: bool = False, + is_transform: bool = False, + ): + raise AbstractMethodError(self) + + # ----------------------------------------------------------------- + # numba + + @final + def _numba_prep(self, data: DataFrame): + ids, _, ngroups = self._grouper.group_info + sorted_index = self._grouper._sort_idx + sorted_ids = self._grouper._sorted_ids + + sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() + # GH 46867 + index_data = data.index + if isinstance(index_data, MultiIndex): + if len(self._grouper.groupings) > 1: + raise NotImplementedError( + "Grouping with more than 1 grouping labels and " + "a MultiIndex is not supported with engine='numba'" + ) + group_key = self._grouper.groupings[0].name + index_data = index_data.get_level_values(group_key) + sorted_index_data = index_data.take(sorted_index).to_numpy() + + starts, ends = lib.generate_slices(sorted_ids, ngroups) + return ( + starts, + ends, + sorted_index_data, + sorted_data, + ) + + def _numba_agg_general( + self, + func: Callable, + dtype_mapping: dict[np.dtype, Any], + engine_kwargs: dict[str, bool] | None, + **aggregator_kwargs, + ): + """ + Perform groupby with a standard numerical aggregation function (e.g. mean) + with Numba. + """ + if not self.as_index: + raise NotImplementedError( + "as_index=False is not supported. Use .reset_index() instead." + ) + if self.axis == 1: + raise NotImplementedError("axis=1 is not supported.") + + data = self._obj_with_exclusions + df = data if data.ndim == 2 else data.to_frame() + + aggregator = executor.generate_shared_aggregator( + func, + dtype_mapping, + True, # is_grouped_kernel + **get_jit_arguments(engine_kwargs), + ) + # Pass group ids to kernel directly if it can handle it + # (This is faster since it doesn't require a sort) + ids, _, _ = self._grouper.group_info + ngroups = self._grouper.ngroups + + res_mgr = df._mgr.apply( + aggregator, labels=ids, ngroups=ngroups, **aggregator_kwargs + ) + res_mgr.axes[1] = self._grouper.result_index + result = df._constructor_from_mgr(res_mgr, axes=res_mgr.axes) + + if data.ndim == 1: + result = result.squeeze("columns") + result.name = data.name + else: + result.columns = data.columns + return result + + @final + def _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs): + """ + Perform groupby transform routine with the numba engine. + + This routine mimics the data splitting routine of the DataSplitter class + to generate the indices of each group in the sorted data and then passes the + data and indices into a Numba jitted function. + """ + data = self._obj_with_exclusions + df = data if data.ndim == 2 else data.to_frame() + + starts, ends, sorted_index, sorted_data = self._numba_prep(df) + numba_.validate_udf(func) + numba_transform_func = numba_.generate_numba_transform_func( + func, **get_jit_arguments(engine_kwargs, kwargs) + ) + result = numba_transform_func( + sorted_data, + sorted_index, + starts, + ends, + len(df.columns), + *args, + ) + # result values needs to be resorted to their original positions since we + # evaluated the data sorted by group + result = result.take(np.argsort(sorted_index), axis=0) + index = data.index + if data.ndim == 1: + result_kwargs = {"name": data.name} + result = result.ravel() + else: + result_kwargs = {"columns": data.columns} + return data._constructor(result, index=index, **result_kwargs) + + @final + def _aggregate_with_numba(self, func, *args, engine_kwargs=None, **kwargs): + """ + Perform groupby aggregation routine with the numba engine. + + This routine mimics the data splitting routine of the DataSplitter class + to generate the indices of each group in the sorted data and then passes the + data and indices into a Numba jitted function. + """ + data = self._obj_with_exclusions + df = data if data.ndim == 2 else data.to_frame() + + starts, ends, sorted_index, sorted_data = self._numba_prep(df) + numba_.validate_udf(func) + numba_agg_func = numba_.generate_numba_agg_func( + func, **get_jit_arguments(engine_kwargs, kwargs) + ) + result = numba_agg_func( + sorted_data, + sorted_index, + starts, + ends, + len(df.columns), + *args, + ) + index = self._grouper.result_index + if data.ndim == 1: + result_kwargs = {"name": data.name} + result = result.ravel() + else: + result_kwargs = {"columns": data.columns} + res = data._constructor(result, index=index, **result_kwargs) + if not self.as_index: + res = self._insert_inaxis_grouper(res) + res.index = default_index(len(res)) + return res + + # ----------------------------------------------------------------- + # apply/agg/transform + + @Appender( + _apply_docs["template"].format( + input="dataframe", examples=_apply_docs["dataframe_examples"] + ) + ) + def apply(self, func, *args, include_groups: bool = True, **kwargs) -> NDFrameT: + orig_func = func + func = com.is_builtin_func(func) + if orig_func != func: + alias = com._builtin_table_alias[orig_func] + warn_alias_replacement(self, orig_func, alias) + + if isinstance(func, str): + if hasattr(self, func): + res = getattr(self, func) + if callable(res): + return res(*args, **kwargs) + elif args or kwargs: + raise ValueError(f"Cannot pass arguments to property {func}") + return res + + else: + raise TypeError(f"apply func should be callable, not '{func}'") + + elif args or kwargs: + if callable(func): + + @wraps(func) + def f(g): + return func(g, *args, **kwargs) + + else: + raise ValueError( + "func must be a callable if args or kwargs are supplied" + ) + else: + f = func + + if not include_groups: + return self._python_apply_general(f, self._obj_with_exclusions) + + # ignore SettingWithCopy here in case the user mutates + with option_context("mode.chained_assignment", None): + try: + result = self._python_apply_general(f, self._selected_obj) + if ( + not isinstance(self.obj, Series) + and self._selection is None + and self._selected_obj.shape != self._obj_with_exclusions.shape + ): + warnings.warn( + message=_apply_groupings_depr.format( + type(self).__name__, "apply" + ), + category=DeprecationWarning, + stacklevel=find_stack_level(), + ) + except TypeError: + # gh-20949 + # try again, with .apply acting as a filtering + # operation, by excluding the grouping column + # This would normally not be triggered + # except if the udf is trying an operation that + # fails on *some* columns, e.g. a numeric operation + # on a string grouper column + + return self._python_apply_general(f, self._obj_with_exclusions) + + return result + + @final + def _python_apply_general( + self, + f: Callable, + data: DataFrame | Series, + not_indexed_same: bool | None = None, + is_transform: bool = False, + is_agg: bool = False, + ) -> NDFrameT: + """ + Apply function f in python space + + Parameters + ---------- + f : callable + Function to apply + data : Series or DataFrame + Data to apply f to + not_indexed_same: bool, optional + When specified, overrides the value of not_indexed_same. Apply behaves + differently when the result index is equal to the input index, but + this can be coincidental leading to value-dependent behavior. + is_transform : bool, default False + Indicator for whether the function is actually a transform + and should not have group keys prepended. + is_agg : bool, default False + Indicator for whether the function is an aggregation. When the + result is empty, we don't want to warn for this case. + See _GroupBy._python_agg_general. + + Returns + ------- + Series or DataFrame + data after applying f + """ + values, mutated = self._grouper.apply_groupwise(f, data, self.axis) + if not_indexed_same is None: + not_indexed_same = mutated + + return self._wrap_applied_output( + data, + values, + not_indexed_same, + is_transform, + ) + + @final + def _agg_general( + self, + numeric_only: bool = False, + min_count: int = -1, + *, + alias: str, + npfunc: Callable | None = None, + **kwargs, + ): + result = self._cython_agg_general( + how=alias, + alt=npfunc, + numeric_only=numeric_only, + min_count=min_count, + **kwargs, + ) + return result.__finalize__(self.obj, method="groupby") + + def _agg_py_fallback( + self, how: str, values: ArrayLike, ndim: int, alt: Callable + ) -> ArrayLike: + """ + Fallback to pure-python aggregation if _cython_operation raises + NotImplementedError. + """ + # We get here with a) EADtypes and b) object dtype + assert alt is not None + + if values.ndim == 1: + # For DataFrameGroupBy we only get here with ExtensionArray + ser = Series(values, copy=False) + else: + # We only get here with values.dtype == object + df = DataFrame(values.T, dtype=values.dtype) + # bc we split object blocks in grouped_reduce, we have only 1 col + # otherwise we'd have to worry about block-splitting GH#39329 + assert df.shape[1] == 1 + # Avoid call to self.values that can occur in DataFrame + # reductions; see GH#28949 + ser = df.iloc[:, 0] + + # We do not get here with UDFs, so we know that our dtype + # should always be preserved by the implemented aggregations + # TODO: Is this exactly right; see WrappedCythonOp get_result_dtype? + try: + res_values = self._grouper.agg_series(ser, alt, preserve_dtype=True) + except Exception as err: + msg = f"agg function failed [how->{how},dtype->{ser.dtype}]" + # preserve the kind of exception that raised + raise type(err)(msg) from err + + if ser.dtype == object: + res_values = res_values.astype(object, copy=False) + + # If we are DataFrameGroupBy and went through a SeriesGroupByPath + # then we need to reshape + # GH#32223 includes case with IntegerArray values, ndarray res_values + # test_groupby_duplicate_columns with object dtype values + return ensure_block_shape(res_values, ndim=ndim) + + @final + def _cython_agg_general( + self, + how: str, + alt: Callable | None = None, + numeric_only: bool = False, + min_count: int = -1, + **kwargs, + ): + # Note: we never get here with how="ohlc" for DataFrameGroupBy; + # that goes through SeriesGroupBy + + data = self._get_data_to_aggregate(numeric_only=numeric_only, name=how) + + def array_func(values: ArrayLike) -> ArrayLike: + try: + result = self._grouper._cython_operation( + "aggregate", + values, + how, + axis=data.ndim - 1, + min_count=min_count, + **kwargs, + ) + except NotImplementedError: + # generally if we have numeric_only=False + # and non-applicable functions + # try to python agg + # TODO: shouldn't min_count matter? + # TODO: avoid special casing SparseArray here + if how in ["any", "all"] and isinstance(values, SparseArray): + pass + elif alt is None or how in ["any", "all", "std", "sem"]: + raise # TODO: re-raise as TypeError? should not be reached + else: + return result + + assert alt is not None + result = self._agg_py_fallback(how, values, ndim=data.ndim, alt=alt) + return result + + new_mgr = data.grouped_reduce(array_func) + res = self._wrap_agged_manager(new_mgr) + if how in ["idxmin", "idxmax"]: + res = self._wrap_idxmax_idxmin(res) + out = self._wrap_aggregated_output(res) + if self.axis == 1: + out = out.infer_objects(copy=False) + return out + + def _cython_transform( + self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs + ): + raise AbstractMethodError(self) + + @final + def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + # optimized transforms + orig_func = func + func = com.get_cython_func(func) or func + if orig_func != func: + warn_alias_replacement(self, orig_func, func) + + if not isinstance(func, str): + return self._transform_general(func, engine, engine_kwargs, *args, **kwargs) + + elif func not in base.transform_kernel_allowlist: + msg = f"'{func}' is not a valid function name for transform(name)" + raise ValueError(msg) + elif func in base.cythonized_kernels or func in base.transformation_kernels: + # cythonized transform or canned "agg+broadcast" + if engine is not None: + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + return getattr(self, func)(*args, **kwargs) + + else: + # i.e. func in base.reduction_kernels + + # GH#30918 Use _transform_fast only when we know func is an aggregation + # If func is a reduction, we need to broadcast the + # result to the whole group. Compute func result + # and deal with possible broadcasting below. + with com.temp_setattr(self, "as_index", True): + # GH#49834 - result needs groups in the index for + # _wrap_transform_fast_result + if func in ["idxmin", "idxmax"]: + func = cast(Literal["idxmin", "idxmax"], func) + result = self._idxmax_idxmin(func, True, *args, **kwargs) + else: + if engine is not None: + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + result = getattr(self, func)(*args, **kwargs) + + return self._wrap_transform_fast_result(result) + + @final + def _wrap_transform_fast_result(self, result: NDFrameT) -> NDFrameT: + """ + Fast transform path for aggregations. + """ + obj = self._obj_with_exclusions + + # for each col, reshape to size of original frame by take operation + ids, _, _ = self._grouper.group_info + result = result.reindex(self._grouper.result_index, axis=self.axis, copy=False) + + if self.obj.ndim == 1: + # i.e. SeriesGroupBy + out = algorithms.take_nd(result._values, ids) + output = obj._constructor(out, index=obj.index, name=obj.name) + else: + # `.size()` gives Series output on DataFrame input, need axis 0 + axis = 0 if result.ndim == 1 else self.axis + # GH#46209 + # Don't convert indices: negative indices need to give rise + # to null values in the result + new_ax = result.axes[axis].take(ids) + output = result._reindex_with_indexers( + {axis: (new_ax, ids)}, allow_dups=True, copy=False + ) + output = output.set_axis(obj._get_axis(self.axis), axis=axis) + return output + + # ----------------------------------------------------------------- + # Utilities + + @final + def _apply_filter(self, indices, dropna): + if len(indices) == 0: + indices = np.array([], dtype="int64") + else: + indices = np.sort(np.concatenate(indices)) + if dropna: + filtered = self._selected_obj.take(indices, axis=self.axis) + else: + mask = np.empty(len(self._selected_obj.index), dtype=bool) + mask.fill(False) + mask[indices.astype(int)] = True + # mask fails to broadcast when passed to where; broadcast manually. + mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T + filtered = self._selected_obj.where(mask) # Fill with NaNs. + return filtered + + @final + def _cumcount_array(self, ascending: bool = True) -> np.ndarray: + """ + Parameters + ---------- + ascending : bool, default True + If False, number in reverse, from length of group - 1 to 0. + + Notes + ----- + this is currently implementing sort=False + (though the default is sort=True) for groupby in general + """ + ids, _, ngroups = self._grouper.group_info + sorter = get_group_index_sorter(ids, ngroups) + ids, count = ids[sorter], len(ids) + + if count == 0: + return np.empty(0, dtype=np.int64) + + run = np.r_[True, ids[:-1] != ids[1:]] + rep = np.diff(np.r_[np.nonzero(run)[0], count]) + out = (~run).cumsum() + + if ascending: + out -= np.repeat(out[run], rep) + else: + out = np.repeat(out[np.r_[run[1:], True]], rep) - out + + if self._grouper.has_dropped_na: + out = np.where(ids == -1, np.nan, out.astype(np.float64, copy=False)) + else: + out = out.astype(np.int64, copy=False) + + rev = np.empty(count, dtype=np.intp) + rev[sorter] = np.arange(count, dtype=np.intp) + return out[rev] + + # ----------------------------------------------------------------- + + @final + @property + def _obj_1d_constructor(self) -> Callable: + # GH28330 preserve subclassed Series/DataFrames + if isinstance(self.obj, DataFrame): + return self.obj._constructor_sliced + assert isinstance(self.obj, Series) + return self.obj._constructor + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def any(self, skipna: bool = True) -> NDFrameT: + """ + Return True if any value in the group is truthful, else False. + + Parameters + ---------- + skipna : bool, default True + Flag to ignore nan values during truth testing. + + Returns + ------- + Series or DataFrame + DataFrame or Series of boolean values, where a value is True if any element + is True within its respective group, False otherwise. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 0], index=lst) + >>> ser + a 1 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).any() + a True + b False + dtype: bool + + For DataFrameGroupBy: + + >>> data = [[1, 0, 3], [1, 0, 6], [7, 1, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["ostrich", "penguin", "parrot"]) + >>> df + a b c + ostrich 1 0 3 + penguin 1 0 6 + parrot 7 1 9 + >>> df.groupby(by=["a"]).any() + b c + a + 1 False True + 7 True True + """ + return self._cython_agg_general( + "any", + alt=lambda x: Series(x, copy=False).any(skipna=skipna), + skipna=skipna, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def all(self, skipna: bool = True) -> NDFrameT: + """ + Return True if all values in the group are truthful, else False. + + Parameters + ---------- + skipna : bool, default True + Flag to ignore nan values during truth testing. + + Returns + ------- + Series or DataFrame + DataFrame or Series of boolean values, where a value is True if all elements + are True within its respective group, False otherwise. + %(see_also)s + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 0], index=lst) + >>> ser + a 1 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).all() + a True + b False + dtype: bool + + For DataFrameGroupBy: + + >>> data = [[1, 0, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["ostrich", "penguin", "parrot"]) + >>> df + a b c + ostrich 1 0 3 + penguin 1 5 6 + parrot 7 8 9 + >>> df.groupby(by=["a"]).all() + b c + a + 1 False True + 7 True True + """ + return self._cython_agg_general( + "all", + alt=lambda x: Series(x, copy=False).all(skipna=skipna), + skipna=skipna, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def count(self) -> NDFrameT: + """ + Compute count of group, excluding missing values. + + Returns + ------- + Series or DataFrame + Count of values within each group. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, np.nan], index=lst) + >>> ser + a 1.0 + a 2.0 + b NaN + dtype: float64 + >>> ser.groupby(level=0).count() + a 2 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, np.nan, 3], [1, np.nan, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["cow", "horse", "bull"]) + >>> df + a b c + cow 1 NaN 3 + horse 1 NaN 6 + bull 7 8.0 9 + >>> df.groupby("a").count() + b c + a + 1 0 2 + 7 1 1 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').count() + 2023-01-01 2 + 2023-02-01 2 + Freq: MS, dtype: int64 + """ + data = self._get_data_to_aggregate() + ids, _, ngroups = self._grouper.group_info + mask = ids != -1 + + is_series = data.ndim == 1 + + def hfunc(bvalues: ArrayLike) -> ArrayLike: + # TODO(EA2D): reshape would not be necessary with 2D EAs + if bvalues.ndim == 1: + # EA + masked = mask & ~isna(bvalues).reshape(1, -1) + else: + masked = mask & ~isna(bvalues) + + counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups) + if isinstance(bvalues, BaseMaskedArray): + return IntegerArray( + counted[0], mask=np.zeros(counted.shape[1], dtype=np.bool_) + ) + elif isinstance(bvalues, ArrowExtensionArray) and not isinstance( + bvalues.dtype, StringDtype + ): + dtype = pandas_dtype("int64[pyarrow]") + return type(bvalues)._from_sequence(counted[0], dtype=dtype) + if is_series: + assert counted.ndim == 2 + assert counted.shape[0] == 1 + return counted[0] + return counted + + new_mgr = data.grouped_reduce(hfunc) + new_obj = self._wrap_agged_manager(new_mgr) + + # If we are grouping on categoricals we want unobserved categories to + # return zero, rather than the default of NaN which the reindexing in + # _wrap_aggregated_output() returns. GH 35028 + # e.g. test_dataframe_groupby_on_2_categoricals_when_observed_is_false + with com.temp_setattr(self, "observed", True): + result = self._wrap_aggregated_output(new_obj) + + return self._reindex_output(result, fill_value=0) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def mean( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + """ + Compute mean of groups, excluding missing values. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None`` and defaults to ``False``. + + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting + ``compute.use_numba`` + + .. versionadded:: 1.4.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: 1.4.0 + + Returns + ------- + pandas.Series or pandas.DataFrame + %(see_also)s + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5], + ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C']) + + Groupby one column and return the mean of the remaining columns in + each group. + + >>> df.groupby('A').mean() + B C + A + 1 3.0 1.333333 + 2 4.0 1.500000 + + Groupby two columns and return the mean of the remaining column. + + >>> df.groupby(['A', 'B']).mean() + C + A B + 1 2.0 2.0 + 4.0 1.0 + 2 3.0 1.0 + 5.0 2.0 + + Groupby one column and return the mean of only particular column in + the group. + + >>> df.groupby('A')['B'].mean() + A + 1 3.0 + 2 4.0 + Name: B, dtype: float64 + """ + + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_mean + + return self._numba_agg_general( + grouped_mean, + executor.float_dtype_mapping, + engine_kwargs, + min_periods=0, + ) + else: + result = self._cython_agg_general( + "mean", + alt=lambda x: Series(x, copy=False).mean(numeric_only=numeric_only), + numeric_only=numeric_only, + ) + return result.__finalize__(self.obj, method="groupby") + + @final + def median(self, numeric_only: bool = False) -> NDFrameT: + """ + Compute median of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None`` and defaults to False. + + Returns + ------- + Series or DataFrame + Median of values within each group. + + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).median() + a 7.0 + b 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).median() + a b + dog 3.0 4.0 + mouse 7.0 3.0 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 3, 4, 5], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').median() + 2023-01-01 2.0 + 2023-02-01 4.0 + Freq: MS, dtype: float64 + """ + result = self._cython_agg_general( + "median", + alt=lambda x: Series(x, copy=False).median(numeric_only=numeric_only), + numeric_only=numeric_only, + ) + return result.__finalize__(self.obj, method="groupby") + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def std( + self, + ddof: int = 1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + numeric_only: bool = False, + ): + """ + Compute standard deviation of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting + ``compute.use_numba`` + + .. versionadded:: 1.4.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: 1.4.0 + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Standard deviation of values within each group. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).std() + a 3.21455 + b 0.57735 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).std() + a b + dog 2.000000 3.511885 + mouse 2.217356 1.500000 + """ + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_var + + return np.sqrt( + self._numba_agg_general( + grouped_var, + executor.float_dtype_mapping, + engine_kwargs, + min_periods=0, + ddof=ddof, + ) + ) + else: + return self._cython_agg_general( + "std", + alt=lambda x: Series(x, copy=False).std(ddof=ddof), + numeric_only=numeric_only, + ddof=ddof, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def var( + self, + ddof: int = 1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + numeric_only: bool = False, + ): + """ + Compute variance of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting + ``compute.use_numba`` + + .. versionadded:: 1.4.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: 1.4.0 + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Variance of values within each group. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).var() + a 10.333333 + b 0.333333 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).var() + a b + dog 4.000000 12.333333 + mouse 4.916667 2.250000 + """ + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_var + + return self._numba_agg_general( + grouped_var, + executor.float_dtype_mapping, + engine_kwargs, + min_periods=0, + ddof=ddof, + ) + else: + return self._cython_agg_general( + "var", + alt=lambda x: Series(x, copy=False).var(ddof=ddof), + numeric_only=numeric_only, + ddof=ddof, + ) + + @final + def _value_counts( + self, + subset: Sequence[Hashable] | None = None, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + dropna: bool = True, + ) -> DataFrame | Series: + """ + Shared implementation of value_counts for SeriesGroupBy and DataFrameGroupBy. + + SeriesGroupBy additionally supports a bins argument. See the docstring of + DataFrameGroupBy.value_counts for a description of arguments. + """ + if self.axis == 1: + raise NotImplementedError( + "DataFrameGroupBy.value_counts only handles axis=0" + ) + name = "proportion" if normalize else "count" + + df = self.obj + obj = self._obj_with_exclusions + + in_axis_names = { + grouping.name for grouping in self._grouper.groupings if grouping.in_axis + } + if isinstance(obj, Series): + _name = obj.name + keys = [] if _name in in_axis_names else [obj] + else: + unique_cols = set(obj.columns) + if subset is not None: + subsetted = set(subset) + clashing = subsetted & set(in_axis_names) + if clashing: + raise ValueError( + f"Keys {clashing} in subset cannot be in " + "the groupby column keys." + ) + doesnt_exist = subsetted - unique_cols + if doesnt_exist: + raise ValueError( + f"Keys {doesnt_exist} in subset do not " + f"exist in the DataFrame." + ) + else: + subsetted = unique_cols + + keys = [ + # Can't use .values because the column label needs to be preserved + obj.iloc[:, idx] + for idx, _name in enumerate(obj.columns) + if _name not in in_axis_names and _name in subsetted + ] + + groupings = list(self._grouper.groupings) + for key in keys: + grouper, _, _ = get_grouper( + df, + key=key, + axis=self.axis, + sort=self.sort, + observed=False, + dropna=dropna, + ) + groupings += list(grouper.groupings) + + # Take the size of the overall columns + gb = df.groupby( + groupings, + sort=self.sort, + observed=self.observed, + dropna=self.dropna, + ) + result_series = cast(Series, gb.size()) + result_series.name = name + + # GH-46357 Include non-observed categories + # of non-grouping columns regardless of `observed` + if any( + isinstance(grouping.grouping_vector, (Categorical, CategoricalIndex)) + and not grouping._observed + for grouping in groupings + ): + levels_list = [ping._result_index for ping in groupings] + multi_index = MultiIndex.from_product( + levels_list, names=[ping.name for ping in groupings] + ) + result_series = result_series.reindex(multi_index, fill_value=0) + + if sort: + # Sort by the values + result_series = result_series.sort_values( + ascending=ascending, kind="stable" + ) + if self.sort: + # Sort by the groupings + names = result_series.index.names + # GH#55951 - Temporarily replace names in case they are integers + result_series.index.names = range(len(names)) + index_level = list(range(len(self._grouper.groupings))) + result_series = result_series.sort_index( + level=index_level, sort_remaining=False + ) + result_series.index.names = names + + if normalize: + # Normalize the results by dividing by the original group sizes. + # We are guaranteed to have the first N levels be the + # user-requested grouping. + levels = list( + range(len(self._grouper.groupings), result_series.index.nlevels) + ) + indexed_group_size = result_series.groupby( + result_series.index.droplevel(levels), + sort=self.sort, + dropna=self.dropna, + # GH#43999 - deprecation of observed=False + observed=False, + ).transform("sum") + result_series /= indexed_group_size + + # Handle groups of non-observed categories + result_series = result_series.fillna(0.0) + + result: Series | DataFrame + if self.as_index: + result = result_series + else: + # Convert to frame + index = result_series.index + columns = com.fill_missing_names(index.names) + if name in columns: + raise ValueError(f"Column label '{name}' is duplicate of result column") + result_series.name = name + result_series.index = index.set_names(range(len(columns))) + result_frame = result_series.reset_index() + orig_dtype = self._grouper.groupings[0].obj.columns.dtype # type: ignore[union-attr] + cols = Index(columns, dtype=orig_dtype).insert(len(columns), name) + result_frame.columns = cols + result = result_frame + return result.__finalize__(self.obj, method="value_counts") + + @final + def sem(self, ddof: int = 1, numeric_only: bool = False) -> NDFrameT: + """ + Compute standard error of the mean of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Standard error of the mean of values within each group. + + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([5, 10, 8, 14], index=lst) + >>> ser + a 5 + a 10 + b 8 + b 14 + dtype: int64 + >>> ser.groupby(level=0).sem() + a 2.5 + b 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = [[1, 12, 11], [1, 15, 2], [2, 5, 8], [2, 6, 12]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tuna", "salmon", "catfish", "goldfish"]) + >>> df + a b c + tuna 1 12 11 + salmon 1 15 2 + catfish 2 5 8 + goldfish 2 6 12 + >>> df.groupby("a").sem() + b c + a + 1 1.5 4.5 + 2 0.5 2.0 + + For Resampler: + + >>> ser = pd.Series([1, 3, 2, 4, 3, 8], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').sem() + 2023-01-01 0.577350 + 2023-02-01 1.527525 + Freq: MS, dtype: float64 + """ + if numeric_only and self.obj.ndim == 1 and not is_numeric_dtype(self.obj.dtype): + raise TypeError( + f"{type(self).__name__}.sem called with " + f"numeric_only={numeric_only} and dtype {self.obj.dtype}" + ) + return self._cython_agg_general( + "sem", + alt=lambda x: Series(x, copy=False).sem(ddof=ddof), + numeric_only=numeric_only, + ddof=ddof, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def size(self) -> DataFrame | Series: + """ + Compute group sizes. + + Returns + ------- + DataFrame or Series + Number of rows in each group as a Series if as_index is True + or a DataFrame if as_index is False. + %(see_also)s + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).size() + a 2 + b 1 + dtype: int64 + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["owl", "toucan", "eagle"]) + >>> df + a b c + owl 1 2 3 + toucan 1 5 6 + eagle 7 8 9 + >>> df.groupby("a").size() + a + 1 2 + 7 1 + dtype: int64 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + dtype: int64 + >>> ser.resample('MS').size() + 2023-01-01 2 + 2023-02-01 1 + Freq: MS, dtype: int64 + """ + result = self._grouper.size() + dtype_backend: None | Literal["pyarrow", "numpy_nullable"] = None + if isinstance(self.obj, Series): + if isinstance(self.obj.array, ArrowExtensionArray): + if isinstance(self.obj.array, ArrowStringArrayNumpySemantics): + dtype_backend = None + elif isinstance(self.obj.array, ArrowStringArray): + dtype_backend = "numpy_nullable" + else: + dtype_backend = "pyarrow" + elif isinstance(self.obj.array, BaseMaskedArray): + dtype_backend = "numpy_nullable" + # TODO: For DataFrames what if columns are mixed arrow/numpy/masked? + + # GH28330 preserve subclassed Series/DataFrames through calls + if isinstance(self.obj, Series): + result = self._obj_1d_constructor(result, name=self.obj.name) + else: + result = self._obj_1d_constructor(result) + + if dtype_backend is not None: + result = result.convert_dtypes( + infer_objects=False, + convert_string=False, + convert_boolean=False, + convert_floating=False, + dtype_backend=dtype_backend, + ) + + with com.temp_setattr(self, "as_index", True): + # size already has the desired behavior in GH#49519, but this makes the + # as_index=False path of _reindex_output fail on categorical groupers. + result = self._reindex_output(result, fill_value=0) + if not self.as_index: + # error: Incompatible types in assignment (expression has + # type "DataFrame", variable has type "Series") + result = result.rename("size").reset_index() # type: ignore[assignment] + return result + + @final + @doc( + _groupby_agg_method_engine_template, + fname="sum", + no=False, + mc=0, + e=None, + ek=None, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).sum() + a 3 + b 7 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").sum() + b c + a + 1 10 7 + 2 11 17""" + ), + ) + def sum( + self, + numeric_only: bool = False, + min_count: int = 0, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_sum + + return self._numba_agg_general( + grouped_sum, + executor.default_dtype_mapping, + engine_kwargs, + min_periods=min_count, + ) + else: + # If we are grouping on categoricals we want unobserved categories to + # return zero, rather than the default of NaN which the reindexing in + # _agg_general() returns. GH #31422 + with com.temp_setattr(self, "observed", True): + result = self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="sum", + npfunc=np.sum, + ) + + return self._reindex_output(result, fill_value=0) + + @final + @doc( + _groupby_agg_method_template, + fname="prod", + no=False, + mc=0, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).prod() + a 2 + b 12 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").prod() + b c + a + 1 16 10 + 2 30 72""" + ), + ) + def prod(self, numeric_only: bool = False, min_count: int = 0) -> NDFrameT: + return self._agg_general( + numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod + ) + + @final + @doc( + _groupby_agg_method_engine_template, + fname="min", + no=False, + mc=-1, + e=None, + ek=None, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).min() + a 1 + b 3 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").min() + b c + a + 1 2 2 + 2 5 8""" + ), + ) + def min( + self, + numeric_only: bool = False, + min_count: int = -1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_min_max + + return self._numba_agg_general( + grouped_min_max, + executor.identity_dtype_mapping, + engine_kwargs, + min_periods=min_count, + is_max=False, + ) + else: + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="min", + npfunc=np.min, + ) + + @final + @doc( + _groupby_agg_method_engine_template, + fname="max", + no=False, + mc=-1, + e=None, + ek=None, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).max() + a 2 + b 4 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").max() + b c + a + 1 8 5 + 2 6 9""" + ), + ) + def max( + self, + numeric_only: bool = False, + min_count: int = -1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_min_max + + return self._numba_agg_general( + grouped_min_max, + executor.identity_dtype_mapping, + engine_kwargs, + min_periods=min_count, + is_max=True, + ) + else: + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="max", + npfunc=np.max, + ) + + @final + def first( + self, numeric_only: bool = False, min_count: int = -1, skipna: bool = True + ) -> NDFrameT: + """ + Compute the first entry of each column within each group. + + Defaults to skipping NA elements. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + min_count : int, default -1 + The required number of valid values to perform the operation. If fewer + than ``min_count`` valid values are present the result will be NA. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + + .. versionadded:: 2.2.1 + + Returns + ------- + Series or DataFrame + First values within each group. + + See Also + -------- + DataFrame.groupby : Apply a function groupby to each row or column of a + DataFrame. + pandas.core.groupby.DataFrameGroupBy.last : Compute the last non-null entry + of each column. + pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group. + + Examples + -------- + >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[None, 5, 6], C=[1, 2, 3], + ... D=['3/11/2000', '3/12/2000', '3/13/2000'])) + >>> df['D'] = pd.to_datetime(df['D']) + >>> df.groupby("A").first() + B C D + A + 1 5.0 1 2000-03-11 + 3 6.0 3 2000-03-13 + >>> df.groupby("A").first(min_count=2) + B C D + A + 1 NaN 1.0 2000-03-11 + 3 NaN NaN NaT + >>> df.groupby("A").first(numeric_only=True) + B C + A + 1 5.0 1 + 3 6.0 3 + """ + + def first_compat(obj: NDFrameT, axis: AxisInt = 0): + def first(x: Series): + """Helper function for first item that isn't NA.""" + arr = x.array[notna(x.array)] + if not len(arr): + return x.array.dtype.na_value + return arr[0] + + if isinstance(obj, DataFrame): + return obj.apply(first, axis=axis) + elif isinstance(obj, Series): + return first(obj) + else: # pragma: no cover + raise TypeError(type(obj)) + + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="first", + npfunc=first_compat, + skipna=skipna, + ) + + @final + def last( + self, numeric_only: bool = False, min_count: int = -1, skipna: bool = True + ) -> NDFrameT: + """ + Compute the last entry of each column within each group. + + Defaults to skipping NA elements. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. + min_count : int, default -1 + The required number of valid values to perform the operation. If fewer + than ``min_count`` valid values are present the result will be NA. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + + .. versionadded:: 2.2.1 + + Returns + ------- + Series or DataFrame + Last of values within each group. + + See Also + -------- + DataFrame.groupby : Apply a function groupby to each row or column of a + DataFrame. + pandas.core.groupby.DataFrameGroupBy.first : Compute the first non-null entry + of each column. + pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group. + + Examples + -------- + >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[5, None, 6], C=[1, 2, 3])) + >>> df.groupby("A").last() + B C + A + 1 5.0 2 + 3 6.0 3 + """ + + def last_compat(obj: NDFrameT, axis: AxisInt = 0): + def last(x: Series): + """Helper function for last item that isn't NA.""" + arr = x.array[notna(x.array)] + if not len(arr): + return x.array.dtype.na_value + return arr[-1] + + if isinstance(obj, DataFrame): + return obj.apply(last, axis=axis) + elif isinstance(obj, Series): + return last(obj) + else: # pragma: no cover + raise TypeError(type(obj)) + + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="last", + npfunc=last_compat, + skipna=skipna, + ) + + @final + def ohlc(self) -> DataFrame: + """ + Compute open, high, low and close values of a group, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex + + Returns + ------- + DataFrame + Open, high, low and close values within each group. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['SPX', 'CAC', 'SPX', 'CAC', 'SPX', 'CAC', 'SPX', 'CAC',] + >>> ser = pd.Series([3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 0.1, 0.5], index=lst) + >>> ser + SPX 3.4 + CAC 9.0 + SPX 7.2 + CAC 5.2 + SPX 8.8 + CAC 9.4 + SPX 0.1 + CAC 0.5 + dtype: float64 + >>> ser.groupby(level=0).ohlc() + open high low close + CAC 9.0 9.4 0.5 0.5 + SPX 3.4 8.8 0.1 0.1 + + For DataFrameGroupBy: + + >>> data = {2022: [1.2, 2.3, 8.9, 4.5, 4.4, 3, 2 , 1], + ... 2023: [3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 8.2, 1.0]} + >>> df = pd.DataFrame(data, index=['SPX', 'CAC', 'SPX', 'CAC', + ... 'SPX', 'CAC', 'SPX', 'CAC']) + >>> df + 2022 2023 + SPX 1.2 3.4 + CAC 2.3 9.0 + SPX 8.9 7.2 + CAC 4.5 5.2 + SPX 4.4 8.8 + CAC 3.0 9.4 + SPX 2.0 8.2 + CAC 1.0 1.0 + >>> df.groupby(level=0).ohlc() + 2022 2023 + open high low close open high low close + CAC 2.3 4.5 1.0 1.0 9.0 9.4 1.0 1.0 + SPX 1.2 8.9 1.2 2.0 3.4 8.8 3.4 8.2 + + For Resampler: + + >>> ser = pd.Series([1, 3, 2, 4, 3, 5], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').ohlc() + open high low close + 2023-01-01 1 3 1 2 + 2023-02-01 4 5 3 5 + """ + if self.obj.ndim == 1: + obj = self._selected_obj + + is_numeric = is_numeric_dtype(obj.dtype) + if not is_numeric: + raise DataError("No numeric types to aggregate") + + res_values = self._grouper._cython_operation( + "aggregate", obj._values, "ohlc", axis=0, min_count=-1 + ) + + agg_names = ["open", "high", "low", "close"] + result = self.obj._constructor_expanddim( + res_values, index=self._grouper.result_index, columns=agg_names + ) + return self._reindex_output(result) + + result = self._apply_to_column_groupbys(lambda sgb: sgb.ohlc()) + return result + + @doc(DataFrame.describe) + def describe( + self, + percentiles=None, + include=None, + exclude=None, + ) -> NDFrameT: + obj = self._obj_with_exclusions + + if len(obj) == 0: + described = obj.describe( + percentiles=percentiles, include=include, exclude=exclude + ) + if obj.ndim == 1: + result = described + else: + result = described.unstack() + return result.to_frame().T.iloc[:0] + + with com.temp_setattr(self, "as_index", True): + result = self._python_apply_general( + lambda x: x.describe( + percentiles=percentiles, include=include, exclude=exclude + ), + obj, + not_indexed_same=True, + ) + if self.axis == 1: + return result.T + + # GH#49256 - properly handle the grouping column(s) + result = result.unstack() + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + + return result + + @final + def resample(self, rule, *args, include_groups: bool = True, **kwargs) -> Resampler: + """ + Provide resampling when using a TimeGrouper. + + Given a grouper, the function resamples it according to a string + "string" -> "frequency". + + See the :ref:`frequency aliases ` + documentation for more details. + + Parameters + ---------- + rule : str or DateOffset + The offset string or object representing target grouper conversion. + *args + Possible arguments are `how`, `fill_method`, `limit`, `kind` and + `on`, and other arguments of `TimeGrouper`. + include_groups : bool, default True + When True, will attempt to include the groupings in the operation in + the case that they are columns of the DataFrame. If this raises a + TypeError, the result will be computed with the groupings excluded. + When False, the groupings will be excluded when applying ``func``. + + .. versionadded:: 2.2.0 + + .. deprecated:: 2.2.0 + + Setting include_groups to True is deprecated. Only the value + False will be allowed in a future version of pandas. + + **kwargs + Possible arguments are `how`, `fill_method`, `limit`, `kind` and + `on`, and other arguments of `TimeGrouper`. + + Returns + ------- + pandas.api.typing.DatetimeIndexResamplerGroupby, + pandas.api.typing.PeriodIndexResamplerGroupby, or + pandas.api.typing.TimedeltaIndexResamplerGroupby + Return a new groupby object, with type depending on the data + being resampled. + + See Also + -------- + Grouper : Specify a frequency to resample with when + grouping by a key. + DatetimeIndex.resample : Frequency conversion and resampling of + time series. + + Examples + -------- + >>> idx = pd.date_range('1/1/2000', periods=4, freq='min') + >>> df = pd.DataFrame(data=4 * [range(2)], + ... index=idx, + ... columns=['a', 'b']) + >>> df.iloc[2, 0] = 5 + >>> df + a b + 2000-01-01 00:00:00 0 1 + 2000-01-01 00:01:00 0 1 + 2000-01-01 00:02:00 5 1 + 2000-01-01 00:03:00 0 1 + + Downsample the DataFrame into 3 minute bins and sum the values of + the timestamps falling into a bin. + + >>> df.groupby('a').resample('3min', include_groups=False).sum() + b + a + 0 2000-01-01 00:00:00 2 + 2000-01-01 00:03:00 1 + 5 2000-01-01 00:00:00 1 + + Upsample the series into 30 second bins. + + >>> df.groupby('a').resample('30s', include_groups=False).sum() + b + a + 0 2000-01-01 00:00:00 1 + 2000-01-01 00:00:30 0 + 2000-01-01 00:01:00 1 + 2000-01-01 00:01:30 0 + 2000-01-01 00:02:00 0 + 2000-01-01 00:02:30 0 + 2000-01-01 00:03:00 1 + 5 2000-01-01 00:02:00 1 + + Resample by month. Values are assigned to the month of the period. + + >>> df.groupby('a').resample('ME', include_groups=False).sum() + b + a + 0 2000-01-31 3 + 5 2000-01-31 1 + + Downsample the series into 3 minute bins as above, but close the right + side of the bin interval. + + >>> ( + ... df.groupby('a') + ... .resample('3min', closed='right', include_groups=False) + ... .sum() + ... ) + b + a + 0 1999-12-31 23:57:00 1 + 2000-01-01 00:00:00 2 + 5 2000-01-01 00:00:00 1 + + Downsample the series into 3 minute bins and close the right side of + the bin interval, but label each bin using the right edge instead of + the left. + + >>> ( + ... df.groupby('a') + ... .resample('3min', closed='right', label='right', include_groups=False) + ... .sum() + ... ) + b + a + 0 2000-01-01 00:00:00 1 + 2000-01-01 00:03:00 2 + 5 2000-01-01 00:03:00 1 + """ + from pandas.core.resample import get_resampler_for_grouping + + # mypy flags that include_groups could be specified via `*args` or `**kwargs` + # GH#54961 would resolve. + return get_resampler_for_grouping( # type: ignore[misc] + self, rule, *args, include_groups=include_groups, **kwargs + ) + + @final + def rolling(self, *args, **kwargs) -> RollingGroupby: + """ + Return a rolling grouper, providing rolling functionality per group. + + Parameters + ---------- + window : int, timedelta, str, offset, or BaseIndexer subclass + Size of the moving window. + + If an integer, the fixed number of observations used for + each window. + + If a timedelta, str, or offset, the time period of each window. Each + window will be a variable sized based on the observations included in + the time-period. This is only valid for datetimelike indexes. + To learn more about the offsets & frequency strings, please see `this link + `__. + + If a BaseIndexer subclass, the window boundaries + based on the defined ``get_window_bounds`` method. Additional rolling + keyword arguments, namely ``min_periods``, ``center``, ``closed`` and + ``step`` will be passed to ``get_window_bounds``. + + min_periods : int, default None + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + + For a window that is specified by an offset, + ``min_periods`` will default to 1. + + For a window that is specified by an integer, ``min_periods`` will default + to the size of the window. + + center : bool, default False + If False, set the window labels as the right edge of the window index. + + If True, set the window labels as the center of the window index. + + win_type : str, default None + If ``None``, all points are evenly weighted. + + If a string, it must be a valid `scipy.signal window function + `__. + + Certain Scipy window types require additional parameters to be passed + in the aggregation function. The additional parameters must match + the keywords specified in the Scipy window type method signature. + + on : str, optional + For a DataFrame, a column label or Index level on which + to calculate the rolling window, rather than the DataFrame's index. + + Provided integer column is ignored and excluded from result since + an integer index is not used to calculate the rolling window. + + axis : int or str, default 0 + If ``0`` or ``'index'``, roll across the rows. + + If ``1`` or ``'columns'``, roll across the columns. + + For `Series` this parameter is unused and defaults to 0. + + closed : str, default None + If ``'right'``, the first point in the window is excluded from calculations. + + If ``'left'``, the last point in the window is excluded from calculations. + + If ``'both'``, no points in the window are excluded from calculations. + + If ``'neither'``, the first and last points in the window are excluded + from calculations. + + Default ``None`` (``'right'``). + + method : str {'single', 'table'}, default 'single' + Execute the rolling operation per single column or row (``'single'``) + or over the entire object (``'table'``). + + This argument is only implemented when specifying ``engine='numba'`` + in the method call. + + Returns + ------- + pandas.api.typing.RollingGroupby + Return a new grouper with our rolling appended. + + See Also + -------- + Series.rolling : Calling object with Series data. + DataFrame.rolling : Calling object with DataFrames. + Series.groupby : Apply a function groupby to a Series. + DataFrame.groupby : Apply a function groupby. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 2, 2], + ... 'B': [1, 2, 3, 4], + ... 'C': [0.362, 0.227, 1.267, -0.562]}) + >>> df + A B C + 0 1 1 0.362 + 1 1 2 0.227 + 2 2 3 1.267 + 3 2 4 -0.562 + + >>> df.groupby('A').rolling(2).sum() + B C + A + 1 0 NaN NaN + 1 3.0 0.589 + 2 2 NaN NaN + 3 7.0 0.705 + + >>> df.groupby('A').rolling(2, min_periods=1).sum() + B C + A + 1 0 1.0 0.362 + 1 3.0 0.589 + 2 2 3.0 1.267 + 3 7.0 0.705 + + >>> df.groupby('A').rolling(2, on='B').sum() + B C + A + 1 0 1 NaN + 1 2 0.589 + 2 2 3 NaN + 3 4 0.705 + """ + from pandas.core.window import RollingGroupby + + return RollingGroupby( + self._selected_obj, + *args, + _grouper=self._grouper, + _as_index=self.as_index, + **kwargs, + ) + + @final + @Substitution(name="groupby") + @Appender(_common_see_also) + def expanding(self, *args, **kwargs) -> ExpandingGroupby: + """ + Return an expanding grouper, providing expanding + functionality per group. + + Returns + ------- + pandas.api.typing.ExpandingGroupby + """ + from pandas.core.window import ExpandingGroupby + + return ExpandingGroupby( + self._selected_obj, + *args, + _grouper=self._grouper, + **kwargs, + ) + + @final + @Substitution(name="groupby") + @Appender(_common_see_also) + def ewm(self, *args, **kwargs) -> ExponentialMovingWindowGroupby: + """ + Return an ewm grouper, providing ewm functionality per group. + + Returns + ------- + pandas.api.typing.ExponentialMovingWindowGroupby + """ + from pandas.core.window import ExponentialMovingWindowGroupby + + return ExponentialMovingWindowGroupby( + self._selected_obj, + *args, + _grouper=self._grouper, + **kwargs, + ) + + @final + def _fill(self, direction: Literal["ffill", "bfill"], limit: int | None = None): + """ + Shared function for `pad` and `backfill` to call Cython method. + + Parameters + ---------- + direction : {'ffill', 'bfill'} + Direction passed to underlying Cython function. `bfill` will cause + values to be filled backwards. `ffill` and any other values will + default to a forward fill + limit : int, default None + Maximum number of consecutive values to fill. If `None`, this + method will convert to -1 prior to passing to Cython + + Returns + ------- + `Series` or `DataFrame` with filled values + + See Also + -------- + pad : Returns Series with minimum number of char in object. + backfill : Backward fill the missing values in the dataset. + """ + # Need int value for Cython + if limit is None: + limit = -1 + + ids, _, _ = self._grouper.group_info + sorted_labels = np.argsort(ids, kind="mergesort").astype(np.intp, copy=False) + if direction == "bfill": + sorted_labels = sorted_labels[::-1] + + col_func = partial( + libgroupby.group_fillna_indexer, + labels=ids, + sorted_labels=sorted_labels, + limit=limit, + dropna=self.dropna, + ) + + def blk_func(values: ArrayLike) -> ArrayLike: + mask = isna(values) + if values.ndim == 1: + indexer = np.empty(values.shape, dtype=np.intp) + col_func(out=indexer, mask=mask) + return algorithms.take_nd(values, indexer) + + else: + # We broadcast algorithms.take_nd analogous to + # np.take_along_axis + if isinstance(values, np.ndarray): + dtype = values.dtype + if self._grouper.has_dropped_na: + # dropped null groups give rise to nan in the result + dtype = ensure_dtype_can_hold_na(values.dtype) + out = np.empty(values.shape, dtype=dtype) + else: + # Note: we only get here with backfill/pad, + # so if we have a dtype that cannot hold NAs, + # then there will be no -1s in indexer, so we can use + # the original dtype (no need to ensure_dtype_can_hold_na) + out = type(values)._empty(values.shape, dtype=values.dtype) + + for i, value_element in enumerate(values): + # call group_fillna_indexer column-wise + indexer = np.empty(values.shape[1], dtype=np.intp) + col_func(out=indexer, mask=mask[i]) + out[i, :] = algorithms.take_nd(value_element, indexer) + return out + + mgr = self._get_data_to_aggregate() + res_mgr = mgr.apply(blk_func) + + new_obj = self._wrap_agged_manager(res_mgr) + + if self.axis == 1: + # Only relevant for DataFrameGroupBy + new_obj = new_obj.T + new_obj.columns = self.obj.columns + + new_obj.index = self.obj.index + return new_obj + + @final + @Substitution(name="groupby") + def ffill(self, limit: int | None = None): + """ + Forward fill the values. + + Parameters + ---------- + limit : int, optional + Limit of how many values to fill. + + Returns + ------- + Series or DataFrame + Object with missing values filled. + + See Also + -------- + Series.ffill: Returns Series with minimum number of char in object. + DataFrame.ffill: Object with missing values filled or None if inplace=True. + Series.fillna: Fill NaN values of a Series. + DataFrame.fillna: Fill NaN values of a DataFrame. + + Examples + -------- + + For SeriesGroupBy: + + >>> key = [0, 0, 1, 1] + >>> ser = pd.Series([np.nan, 2, 3, np.nan], index=key) + >>> ser + 0 NaN + 0 2.0 + 1 3.0 + 1 NaN + dtype: float64 + >>> ser.groupby(level=0).ffill() + 0 NaN + 0 2.0 + 1 3.0 + 1 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> df = pd.DataFrame( + ... { + ... "key": [0, 0, 1, 1, 1], + ... "A": [np.nan, 2, np.nan, 3, np.nan], + ... "B": [2, 3, np.nan, np.nan, np.nan], + ... "C": [np.nan, np.nan, 2, np.nan, np.nan], + ... } + ... ) + >>> df + key A B C + 0 0 NaN 2.0 NaN + 1 0 2.0 3.0 NaN + 2 1 NaN NaN 2.0 + 3 1 3.0 NaN NaN + 4 1 NaN NaN NaN + + Propagate non-null values forward or backward within each group along columns. + + >>> df.groupby("key").ffill() + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN 2.0 + + Propagate non-null values forward or backward within each group along rows. + + >>> df.T.groupby(np.array([0, 0, 1, 1])).ffill().T + key A B C + 0 0.0 0.0 2.0 2.0 + 1 0.0 2.0 3.0 3.0 + 2 1.0 1.0 NaN 2.0 + 3 1.0 3.0 NaN NaN + 4 1.0 1.0 NaN NaN + + Only replace the first NaN element within a group along rows. + + >>> df.groupby("key").ffill(limit=1) + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN NaN + """ + return self._fill("ffill", limit=limit) + + @final + @Substitution(name="groupby") + def bfill(self, limit: int | None = None): + """ + Backward fill the values. + + Parameters + ---------- + limit : int, optional + Limit of how many values to fill. + + Returns + ------- + Series or DataFrame + Object with missing values filled. + + See Also + -------- + Series.bfill : Backward fill the missing values in the dataset. + DataFrame.bfill: Backward fill the missing values in the dataset. + Series.fillna: Fill NaN values of a Series. + DataFrame.fillna: Fill NaN values of a DataFrame. + + Examples + -------- + + With Series: + + >>> index = ['Falcon', 'Falcon', 'Parrot', 'Parrot', 'Parrot'] + >>> s = pd.Series([None, 1, None, None, 3], index=index) + >>> s + Falcon NaN + Falcon 1.0 + Parrot NaN + Parrot NaN + Parrot 3.0 + dtype: float64 + >>> s.groupby(level=0).bfill() + Falcon 1.0 + Falcon 1.0 + Parrot 3.0 + Parrot 3.0 + Parrot 3.0 + dtype: float64 + >>> s.groupby(level=0).bfill(limit=1) + Falcon 1.0 + Falcon 1.0 + Parrot NaN + Parrot 3.0 + Parrot 3.0 + dtype: float64 + + With DataFrame: + + >>> df = pd.DataFrame({'A': [1, None, None, None, 4], + ... 'B': [None, None, 5, None, 7]}, index=index) + >>> df + A B + Falcon 1.0 NaN + Falcon NaN NaN + Parrot NaN 5.0 + Parrot NaN NaN + Parrot 4.0 7.0 + >>> df.groupby(level=0).bfill() + A B + Falcon 1.0 NaN + Falcon NaN NaN + Parrot 4.0 5.0 + Parrot 4.0 7.0 + Parrot 4.0 7.0 + >>> df.groupby(level=0).bfill(limit=1) + A B + Falcon 1.0 NaN + Falcon NaN NaN + Parrot NaN 5.0 + Parrot 4.0 7.0 + Parrot 4.0 7.0 + """ + return self._fill("bfill", limit=limit) + + @final + @property + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def nth(self) -> GroupByNthSelector: + """ + Take the nth row from each group if n is an int, otherwise a subset of rows. + + Can be either a call or an index. dropna is not available with index notation. + Index notation accepts a comma separated list of integers and slices. + + If dropna, will take the nth non-null row, dropna is either + 'all' or 'any'; this is equivalent to calling dropna(how=dropna) + before the groupby. + + Parameters + ---------- + n : int, slice or list of ints and slices + A single nth value for the row or a list of nth values or slices. + + .. versionchanged:: 1.4.0 + Added slice and lists containing slices. + Added index notation. + + dropna : {'any', 'all', None}, default None + Apply the specified dropna operation before counting which row is + the nth row. Only supported if n is an int. + + Returns + ------- + Series or DataFrame + N-th value within each group. + %(see_also)s + Examples + -------- + + >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B']) + >>> g = df.groupby('A') + >>> g.nth(0) + A B + 0 1 NaN + 2 2 3.0 + >>> g.nth(1) + A B + 1 1 2.0 + 4 2 5.0 + >>> g.nth(-1) + A B + 3 1 4.0 + 4 2 5.0 + >>> g.nth([0, 1]) + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + 4 2 5.0 + >>> g.nth(slice(None, -1)) + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + + Index notation may also be used + + >>> g.nth[0, 1] + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + 4 2 5.0 + >>> g.nth[:-1] + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + + Specifying `dropna` allows ignoring ``NaN`` values + + >>> g.nth(0, dropna='any') + A B + 1 1 2.0 + 2 2 3.0 + + When the specified ``n`` is larger than any of the groups, an + empty DataFrame is returned + + >>> g.nth(3, dropna='any') + Empty DataFrame + Columns: [A, B] + Index: [] + """ + return GroupByNthSelector(self) + + def _nth( + self, + n: PositionalIndexer | tuple, + dropna: Literal["any", "all", None] = None, + ) -> NDFrameT: + if not dropna: + mask = self._make_mask_from_positional_indexer(n) + + ids, _, _ = self._grouper.group_info + + # Drop NA values in grouping + mask = mask & (ids != -1) + + out = self._mask_selected_obj(mask) + return out + + # dropna is truthy + if not is_integer(n): + raise ValueError("dropna option only supported for an integer argument") + + if dropna not in ["any", "all"]: + # Note: when agg-ing picker doesn't raise this, just returns NaN + raise ValueError( + "For a DataFrame or Series groupby.nth, dropna must be " + "either None, 'any' or 'all', " + f"(was passed {dropna})." + ) + + # old behaviour, but with all and any support for DataFrames. + # modified in GH 7559 to have better perf + n = cast(int, n) + dropped = self._selected_obj.dropna(how=dropna, axis=self.axis) + + # get a new grouper for our dropped obj + grouper: np.ndarray | Index | ops.BaseGrouper + if len(dropped) == len(self._selected_obj): + # Nothing was dropped, can use the same grouper + grouper = self._grouper + else: + # we don't have the grouper info available + # (e.g. we have selected out + # a column that is not in the current object) + axis = self._grouper.axis + grouper = self._grouper.codes_info[axis.isin(dropped.index)] + if self._grouper.has_dropped_na: + # Null groups need to still be encoded as -1 when passed to groupby + nulls = grouper == -1 + # error: No overload variant of "where" matches argument types + # "Any", "NAType", "Any" + values = np.where(nulls, NA, grouper) # type: ignore[call-overload] + grouper = Index(values, dtype="Int64") + + if self.axis == 1: + grb = dropped.T.groupby(grouper, as_index=self.as_index, sort=self.sort) + else: + grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort) + return grb.nth(n) + + @final + def quantile( + self, + q: float | AnyArrayLike = 0.5, + interpolation: str = "linear", + numeric_only: bool = False, + ): + """ + Return group values at the given quantile, a la numpy.percentile. + + Parameters + ---------- + q : float or array-like, default 0.5 (50% quantile) + Value(s) between 0 and 1 providing the quantile(s) to compute. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + Method to use when the desired quantile falls between two points. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Return type determined by caller of GroupBy object. + + See Also + -------- + Series.quantile : Similar method for Series. + DataFrame.quantile : Similar method for DataFrame. + numpy.percentile : NumPy method to compute qth percentile. + + Examples + -------- + >>> df = pd.DataFrame([ + ... ['a', 1], ['a', 2], ['a', 3], + ... ['b', 1], ['b', 3], ['b', 5] + ... ], columns=['key', 'val']) + >>> df.groupby('key').quantile() + val + key + a 2.0 + b 3.0 + """ + mgr = self._get_data_to_aggregate(numeric_only=numeric_only, name="quantile") + obj = self._wrap_agged_manager(mgr) + if self.axis == 1: + splitter = self._grouper._get_splitter(obj.T, axis=self.axis) + sdata = splitter._sorted_data.T + else: + splitter = self._grouper._get_splitter(obj, axis=self.axis) + sdata = splitter._sorted_data + + starts, ends = lib.generate_slices(splitter._slabels, splitter.ngroups) + + def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, DtypeObj | None]: + if is_object_dtype(vals.dtype): + raise TypeError( + "'quantile' cannot be performed against 'object' dtypes!" + ) + + inference: DtypeObj | None = None + if isinstance(vals, BaseMaskedArray) and is_numeric_dtype(vals.dtype): + out = vals.to_numpy(dtype=float, na_value=np.nan) + inference = vals.dtype + elif is_integer_dtype(vals.dtype): + if isinstance(vals, ExtensionArray): + out = vals.to_numpy(dtype=float, na_value=np.nan) + else: + out = vals + inference = np.dtype(np.int64) + elif is_bool_dtype(vals.dtype) and isinstance(vals, ExtensionArray): + out = vals.to_numpy(dtype=float, na_value=np.nan) + elif is_bool_dtype(vals.dtype): + # GH#51424 deprecate to match Series/DataFrame behavior + warnings.warn( + f"Allowing bool dtype in {type(self).__name__}.quantile is " + "deprecated and will raise in a future version, matching " + "the Series/DataFrame behavior. Cast to uint8 dtype before " + "calling quantile instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + out = np.asarray(vals) + elif needs_i8_conversion(vals.dtype): + inference = vals.dtype + # In this case we need to delay the casting until after the + # np.lexsort below. + # error: Incompatible return value type (got + # "Tuple[Union[ExtensionArray, ndarray[Any, Any]], Union[Any, + # ExtensionDtype]]", expected "Tuple[ndarray[Any, Any], + # Optional[Union[dtype[Any], ExtensionDtype]]]") + return vals, inference # type: ignore[return-value] + elif isinstance(vals, ExtensionArray) and is_float_dtype(vals.dtype): + inference = np.dtype(np.float64) + out = vals.to_numpy(dtype=float, na_value=np.nan) + else: + out = np.asarray(vals) + + return out, inference + + def post_processor( + vals: np.ndarray, + inference: DtypeObj | None, + result_mask: np.ndarray | None, + orig_vals: ArrayLike, + ) -> ArrayLike: + if inference: + # Check for edge case + if isinstance(orig_vals, BaseMaskedArray): + assert result_mask is not None # for mypy + + if interpolation in {"linear", "midpoint"} and not is_float_dtype( + orig_vals + ): + return FloatingArray(vals, result_mask) + else: + # Item "ExtensionDtype" of "Union[ExtensionDtype, str, + # dtype[Any], Type[object]]" has no attribute "numpy_dtype" + # [union-attr] + with warnings.catch_warnings(): + # vals.astype with nan can warn with numpy >1.24 + warnings.filterwarnings("ignore", category=RuntimeWarning) + return type(orig_vals)( + vals.astype( + inference.numpy_dtype # type: ignore[union-attr] + ), + result_mask, + ) + + elif not ( + is_integer_dtype(inference) + and interpolation in {"linear", "midpoint"} + ): + if needs_i8_conversion(inference): + # error: Item "ExtensionArray" of "Union[ExtensionArray, + # ndarray[Any, Any]]" has no attribute "_ndarray" + vals = vals.astype("i8").view( + orig_vals._ndarray.dtype # type: ignore[union-attr] + ) + # error: Item "ExtensionArray" of "Union[ExtensionArray, + # ndarray[Any, Any]]" has no attribute "_from_backing_data" + return orig_vals._from_backing_data( # type: ignore[union-attr] + vals + ) + + assert isinstance(inference, np.dtype) # for mypy + return vals.astype(inference) + + return vals + + qs = np.array(q, dtype=np.float64) + pass_qs: np.ndarray | None = qs + if is_scalar(q): + qs = np.array([q], dtype=np.float64) + pass_qs = None + + ids, _, ngroups = self._grouper.group_info + nqs = len(qs) + + func = partial( + libgroupby.group_quantile, + labels=ids, + qs=qs, + interpolation=interpolation, + starts=starts, + ends=ends, + ) + + def blk_func(values: ArrayLike) -> ArrayLike: + orig_vals = values + if isinstance(values, BaseMaskedArray): + mask = values._mask + result_mask = np.zeros((ngroups, nqs), dtype=np.bool_) + else: + mask = isna(values) + result_mask = None + + is_datetimelike = needs_i8_conversion(values.dtype) + + vals, inference = pre_processor(values) + + ncols = 1 + if vals.ndim == 2: + ncols = vals.shape[0] + + out = np.empty((ncols, ngroups, nqs), dtype=np.float64) + + if is_datetimelike: + vals = vals.view("i8") + + if vals.ndim == 1: + # EA is always 1d + func( + out[0], + values=vals, + mask=mask, + result_mask=result_mask, + is_datetimelike=is_datetimelike, + ) + else: + for i in range(ncols): + func( + out[i], + values=vals[i], + mask=mask[i], + result_mask=None, + is_datetimelike=is_datetimelike, + ) + + if vals.ndim == 1: + out = out.ravel("K") + if result_mask is not None: + result_mask = result_mask.ravel("K") + else: + out = out.reshape(ncols, ngroups * nqs) + + return post_processor(out, inference, result_mask, orig_vals) + + res_mgr = sdata._mgr.grouped_reduce(blk_func) + + res = self._wrap_agged_manager(res_mgr) + return self._wrap_aggregated_output(res, qs=pass_qs) + + @final + @Substitution(name="groupby") + def ngroup(self, ascending: bool = True): + """ + Number each group from 0 to the number of groups - 1. + + This is the enumerative complement of cumcount. Note that the + numbers given to the groups match the order in which the groups + would be seen when iterating over the groupby object, not the + order they are first observed. + + Groups with missing keys (where `pd.isna()` is True) will be labeled with `NaN` + and will be skipped from the count. + + Parameters + ---------- + ascending : bool, default True + If False, number in reverse, from number of group - 1 to 0. + + Returns + ------- + Series + Unique numbers for each group. + + See Also + -------- + .cumcount : Number the rows in each group. + + Examples + -------- + >>> df = pd.DataFrame({"color": ["red", None, "red", "blue", "blue", "red"]}) + >>> df + color + 0 red + 1 None + 2 red + 3 blue + 4 blue + 5 red + >>> df.groupby("color").ngroup() + 0 1.0 + 1 NaN + 2 1.0 + 3 0.0 + 4 0.0 + 5 1.0 + dtype: float64 + >>> df.groupby("color", dropna=False).ngroup() + 0 1 + 1 2 + 2 1 + 3 0 + 4 0 + 5 1 + dtype: int64 + >>> df.groupby("color", dropna=False).ngroup(ascending=False) + 0 1 + 1 0 + 2 1 + 3 2 + 4 2 + 5 1 + dtype: int64 + """ + obj = self._obj_with_exclusions + index = obj._get_axis(self.axis) + comp_ids = self._grouper.group_info[0] + + dtype: type + if self._grouper.has_dropped_na: + comp_ids = np.where(comp_ids == -1, np.nan, comp_ids) + dtype = np.float64 + else: + dtype = np.int64 + + if any(ping._passed_categorical for ping in self._grouper.groupings): + # comp_ids reflect non-observed groups, we need only observed + comp_ids = rank_1d(comp_ids, ties_method="dense") - 1 + + result = self._obj_1d_constructor(comp_ids, index, dtype=dtype) + if not ascending: + result = self.ngroups - 1 - result + return result + + @final + @Substitution(name="groupby") + def cumcount(self, ascending: bool = True): + """ + Number each item in each group from 0 to the length of that group - 1. + + Essentially this is equivalent to + + .. code-block:: python + + self.apply(lambda x: pd.Series(np.arange(len(x)), x.index)) + + Parameters + ---------- + ascending : bool, default True + If False, number in reverse, from length of group - 1 to 0. + + Returns + ------- + Series + Sequence number of each element within each group. + + See Also + -------- + .ngroup : Number the groups themselves. + + Examples + -------- + >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']], + ... columns=['A']) + >>> df + A + 0 a + 1 a + 2 a + 3 b + 4 b + 5 a + >>> df.groupby('A').cumcount() + 0 0 + 1 1 + 2 2 + 3 0 + 4 1 + 5 3 + dtype: int64 + >>> df.groupby('A').cumcount(ascending=False) + 0 3 + 1 2 + 2 1 + 3 1 + 4 0 + 5 0 + dtype: int64 + """ + index = self._obj_with_exclusions._get_axis(self.axis) + cumcounts = self._cumcount_array(ascending=ascending) + return self._obj_1d_constructor(cumcounts, index) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def rank( + self, + method: str = "average", + ascending: bool = True, + na_option: str = "keep", + pct: bool = False, + axis: AxisInt | lib.NoDefault = lib.no_default, + ) -> NDFrameT: + """ + Provide the rank of values within each group. + + Parameters + ---------- + method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' + * average: average rank of group. + * min: lowest rank in group. + * max: highest rank in group. + * first: ranks assigned in order they appear in the array. + * dense: like 'min', but rank always increases by 1 between groups. + ascending : bool, default True + False for ranks by high (1) to low (N). + na_option : {'keep', 'top', 'bottom'}, default 'keep' + * keep: leave NA values where they are. + * top: smallest rank if ascending. + * bottom: smallest rank if descending. + pct : bool, default False + Compute percentage rank of data within each group. + axis : int, default 0 + The axis of the object over which to compute the rank. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + Returns + ------- + DataFrame with ranking of values within each group + %(see_also)s + Examples + -------- + >>> df = pd.DataFrame( + ... { + ... "group": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], + ... "value": [2, 4, 2, 3, 5, 1, 2, 4, 1, 5], + ... } + ... ) + >>> df + group value + 0 a 2 + 1 a 4 + 2 a 2 + 3 a 3 + 4 a 5 + 5 b 1 + 6 b 2 + 7 b 4 + 8 b 1 + 9 b 5 + >>> for method in ['average', 'min', 'max', 'dense', 'first']: + ... df[f'{method}_rank'] = df.groupby('group')['value'].rank(method) + >>> df + group value average_rank min_rank max_rank dense_rank first_rank + 0 a 2 1.5 1.0 2.0 1.0 1.0 + 1 a 4 4.0 4.0 4.0 3.0 4.0 + 2 a 2 1.5 1.0 2.0 1.0 2.0 + 3 a 3 3.0 3.0 3.0 2.0 3.0 + 4 a 5 5.0 5.0 5.0 4.0 5.0 + 5 b 1 1.5 1.0 2.0 1.0 1.0 + 6 b 2 3.0 3.0 3.0 2.0 3.0 + 7 b 4 4.0 4.0 4.0 3.0 4.0 + 8 b 1 1.5 1.0 2.0 1.0 2.0 + 9 b 5 5.0 5.0 5.0 4.0 5.0 + """ + if na_option not in {"keep", "top", "bottom"}: + msg = "na_option must be one of 'keep', 'top', or 'bottom'" + raise ValueError(msg) + + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "rank") + else: + axis = 0 + + kwargs = { + "ties_method": method, + "ascending": ascending, + "na_option": na_option, + "pct": pct, + } + if axis != 0: + # DataFrame uses different keyword name + kwargs["method"] = kwargs.pop("ties_method") + f = lambda x: x.rank(axis=axis, numeric_only=False, **kwargs) + result = self._python_apply_general( + f, self._selected_obj, is_transform=True + ) + return result + + return self._cython_transform( + "rank", + numeric_only=False, + axis=axis, + **kwargs, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cumprod( + self, axis: Axis | lib.NoDefault = lib.no_default, *args, **kwargs + ) -> NDFrameT: + """ + Cumulative product for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([6, 2, 0], index=lst) + >>> ser + a 6 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).cumprod() + a 6 + a 12 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["cow", "horse", "bull"]) + >>> df + a b c + cow 1 8 2 + horse 1 2 5 + bull 2 6 9 + >>> df.groupby("a").groups + {1: ['cow', 'horse'], 2: ['bull']} + >>> df.groupby("a").cumprod() + b c + cow 8 2 + horse 16 10 + bull 6 9 + """ + nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"]) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cumprod") + else: + axis = 0 + + if axis != 0: + f = lambda x: x.cumprod(axis=axis, **kwargs) + return self._python_apply_general(f, self._selected_obj, is_transform=True) + + return self._cython_transform("cumprod", **kwargs) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cumsum( + self, axis: Axis | lib.NoDefault = lib.no_default, *args, **kwargs + ) -> NDFrameT: + """ + Cumulative sum for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([6, 2, 0], index=lst) + >>> ser + a 6 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).cumsum() + a 6 + a 8 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["fox", "gorilla", "lion"]) + >>> df + a b c + fox 1 8 2 + gorilla 1 2 5 + lion 2 6 9 + >>> df.groupby("a").groups + {1: ['fox', 'gorilla'], 2: ['lion']} + >>> df.groupby("a").cumsum() + b c + fox 8 2 + gorilla 10 7 + lion 6 9 + """ + nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"]) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cumsum") + else: + axis = 0 + + if axis != 0: + f = lambda x: x.cumsum(axis=axis, **kwargs) + return self._python_apply_general(f, self._selected_obj, is_transform=True) + + return self._cython_transform("cumsum", **kwargs) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cummin( + self, + axis: AxisInt | lib.NoDefault = lib.no_default, + numeric_only: bool = False, + **kwargs, + ) -> NDFrameT: + """ + Cumulative min for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([1, 6, 2, 3, 0, 4], index=lst) + >>> ser + a 1 + a 6 + a 2 + b 3 + b 0 + b 4 + dtype: int64 + >>> ser.groupby(level=0).cummin() + a 1 + a 1 + a 1 + b 3 + b 0 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 0, 2], [1, 1, 5], [6, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["snake", "rabbit", "turtle"]) + >>> df + a b c + snake 1 0 2 + rabbit 1 1 5 + turtle 6 6 9 + >>> df.groupby("a").groups + {1: ['snake', 'rabbit'], 6: ['turtle']} + >>> df.groupby("a").cummin() + b c + snake 0 2 + rabbit 0 2 + turtle 6 9 + """ + skipna = kwargs.get("skipna", True) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cummin") + else: + axis = 0 + + if axis != 0: + f = lambda x: np.minimum.accumulate(x, axis) + obj = self._selected_obj + if numeric_only: + obj = obj._get_numeric_data() + return self._python_apply_general(f, obj, is_transform=True) + + return self._cython_transform( + "cummin", numeric_only=numeric_only, skipna=skipna + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cummax( + self, + axis: AxisInt | lib.NoDefault = lib.no_default, + numeric_only: bool = False, + **kwargs, + ) -> NDFrameT: + """ + Cumulative max for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([1, 6, 2, 3, 1, 4], index=lst) + >>> ser + a 1 + a 6 + a 2 + b 3 + b 1 + b 4 + dtype: int64 + >>> ser.groupby(level=0).cummax() + a 1 + a 6 + a 6 + b 3 + b 3 + b 4 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 1, 0], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["cow", "horse", "bull"]) + >>> df + a b c + cow 1 8 2 + horse 1 1 0 + bull 2 6 9 + >>> df.groupby("a").groups + {1: ['cow', 'horse'], 2: ['bull']} + >>> df.groupby("a").cummax() + b c + cow 8 2 + horse 8 2 + bull 6 9 + """ + skipna = kwargs.get("skipna", True) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cummax") + else: + axis = 0 + + if axis != 0: + f = lambda x: np.maximum.accumulate(x, axis) + obj = self._selected_obj + if numeric_only: + obj = obj._get_numeric_data() + return self._python_apply_general(f, obj, is_transform=True) + + return self._cython_transform( + "cummax", numeric_only=numeric_only, skipna=skipna + ) + + @final + @Substitution(name="groupby") + def shift( + self, + periods: int | Sequence[int] = 1, + freq=None, + axis: Axis | lib.NoDefault = lib.no_default, + fill_value=lib.no_default, + suffix: str | None = None, + ): + """ + Shift each group by periods observations. + + If freq is passed, the index will be increased using the periods and the freq. + + Parameters + ---------- + periods : int | Sequence[int], default 1 + Number of periods to shift. If a list of values, shift each group by + each period. + freq : str, optional + Frequency string. + axis : axis to shift, default 0 + Shift direction. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + fill_value : optional + The scalar value to use for newly introduced missing values. + + .. versionchanged:: 2.1.0 + Will raise a ``ValueError`` if ``freq`` is provided too. + + suffix : str, optional + A string to add to each shifted column if there are multiple periods. + Ignored otherwise. + + Returns + ------- + Series or DataFrame + Object shifted within each group. + + See Also + -------- + Index.shift : Shift values of Index. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).shift(1) + a NaN + a 1.0 + b NaN + b 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tuna", "salmon", "catfish", "goldfish"]) + >>> df + a b c + tuna 1 2 3 + salmon 1 5 6 + catfish 2 5 8 + goldfish 2 6 9 + >>> df.groupby("a").shift(1) + b c + tuna NaN NaN + salmon 2.0 3.0 + catfish NaN NaN + goldfish 5.0 8.0 + """ + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "shift") + else: + axis = 0 + + if is_list_like(periods): + if axis == 1: + raise ValueError( + "If `periods` contains multiple shifts, `axis` cannot be 1." + ) + periods = cast(Sequence, periods) + if len(periods) == 0: + raise ValueError("If `periods` is an iterable, it cannot be empty.") + from pandas.core.reshape.concat import concat + + add_suffix = True + else: + if not is_integer(periods): + raise TypeError( + f"Periods must be integer, but {periods} is {type(periods)}." + ) + if suffix: + raise ValueError("Cannot specify `suffix` if `periods` is an int.") + periods = [cast(int, periods)] + add_suffix = False + + shifted_dataframes = [] + for period in periods: + if not is_integer(period): + raise TypeError( + f"Periods must be integer, but {period} is {type(period)}." + ) + period = cast(int, period) + if freq is not None or axis != 0: + f = lambda x: x.shift( + period, freq, axis, fill_value # pylint: disable=cell-var-from-loop + ) + shifted = self._python_apply_general( + f, self._selected_obj, is_transform=True + ) + else: + if fill_value is lib.no_default: + fill_value = None + ids, _, ngroups = self._grouper.group_info + res_indexer = np.zeros(len(ids), dtype=np.int64) + + libgroupby.group_shift_indexer(res_indexer, ids, ngroups, period) + + obj = self._obj_with_exclusions + + shifted = obj._reindex_with_indexers( + {self.axis: (obj.axes[self.axis], res_indexer)}, + fill_value=fill_value, + allow_dups=True, + ) + + if add_suffix: + if isinstance(shifted, Series): + shifted = cast(NDFrameT, shifted.to_frame()) + shifted = shifted.add_suffix( + f"{suffix}_{period}" if suffix else f"_{period}" + ) + shifted_dataframes.append(cast(Union[Series, DataFrame], shifted)) + + return ( + shifted_dataframes[0] + if len(shifted_dataframes) == 1 + else concat(shifted_dataframes, axis=1) + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def diff( + self, periods: int = 1, axis: AxisInt | lib.NoDefault = lib.no_default + ) -> NDFrameT: + """ + First discrete difference of element. + + Calculates the difference of each element compared with another + element in the group (default is element in previous row). + + Parameters + ---------- + periods : int, default 1 + Periods to shift for calculating difference, accepts negative values. + axis : axis to shift, default 0 + Take difference over rows (0) or columns (1). + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + Returns + ------- + Series or DataFrame + First differences. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).diff() + a NaN + a -5.0 + a 6.0 + b NaN + b -1.0 + b 0.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).diff() + a b + dog NaN NaN + dog 2.0 3.0 + dog 2.0 4.0 + mouse NaN NaN + mouse 0.0 0.0 + mouse 1.0 -2.0 + mouse -5.0 -1.0 + """ + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "diff") + else: + axis = 0 + + if axis != 0: + return self.apply(lambda x: x.diff(periods=periods, axis=axis)) + + obj = self._obj_with_exclusions + shifted = self.shift(periods=periods) + + # GH45562 - to retain existing behavior and match behavior of Series.diff(), + # int8 and int16 are coerced to float32 rather than float64. + dtypes_to_f32 = ["int8", "int16"] + if obj.ndim == 1: + if obj.dtype in dtypes_to_f32: + shifted = shifted.astype("float32") + else: + to_coerce = [c for c, dtype in obj.dtypes.items() if dtype in dtypes_to_f32] + if len(to_coerce): + shifted = shifted.astype({c: "float32" for c in to_coerce}) + + return obj - shifted + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def pct_change( + self, + periods: int = 1, + fill_method: FillnaOptions | None | lib.NoDefault = lib.no_default, + limit: int | None | lib.NoDefault = lib.no_default, + freq=None, + axis: Axis | lib.NoDefault = lib.no_default, + ): + """ + Calculate pct_change of each value to previous entry in group. + + Returns + ------- + Series or DataFrame + Percentage changes within each group. + %(see_also)s + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).pct_change() + a NaN + a 1.000000 + b NaN + b 0.333333 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tuna", "salmon", "catfish", "goldfish"]) + >>> df + a b c + tuna 1 2 3 + salmon 1 5 6 + catfish 2 5 8 + goldfish 2 6 9 + >>> df.groupby("a").pct_change() + b c + tuna NaN NaN + salmon 1.5 1.000 + catfish NaN NaN + goldfish 0.2 0.125 + """ + # GH#53491 + if fill_method not in (lib.no_default, None) or limit is not lib.no_default: + warnings.warn( + "The 'fill_method' keyword being not None and the 'limit' keyword in " + f"{type(self).__name__}.pct_change are deprecated and will be removed " + "in a future version. Either fill in any non-leading NA values prior " + "to calling pct_change or specify 'fill_method=None' to not fill NA " + "values.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if fill_method is lib.no_default: + if limit is lib.no_default and any( + grp.isna().values.any() for _, grp in self + ): + warnings.warn( + "The default fill_method='ffill' in " + f"{type(self).__name__}.pct_change is deprecated and will " + "be removed in a future version. Either fill in any " + "non-leading NA values prior to calling pct_change or " + "specify 'fill_method=None' to not fill NA values.", + FutureWarning, + stacklevel=find_stack_level(), + ) + fill_method = "ffill" + if limit is lib.no_default: + limit = None + + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "pct_change") + else: + axis = 0 + + # TODO(GH#23918): Remove this conditional for SeriesGroupBy when + # GH#23918 is fixed + if freq is not None or axis != 0: + f = lambda x: x.pct_change( + periods=periods, + fill_method=fill_method, + limit=limit, + freq=freq, + axis=axis, + ) + return self._python_apply_general(f, self._selected_obj, is_transform=True) + + if fill_method is None: # GH30463 + fill_method = "ffill" + limit = 0 + filled = getattr(self, fill_method)(limit=limit) + if self.axis == 0: + fill_grp = filled.groupby(self._grouper.codes, group_keys=self.group_keys) + else: + fill_grp = filled.T.groupby(self._grouper.codes, group_keys=self.group_keys) + shifted = fill_grp.shift(periods=periods, freq=freq) + if self.axis == 1: + shifted = shifted.T + return (filled / shifted) - 1 + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def head(self, n: int = 5) -> NDFrameT: + """ + Return first n rows of each group. + + Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows + from the original DataFrame with original index and order preserved + (``as_index`` flag is ignored). + + Parameters + ---------- + n : int + If positive: number of entries to include from start of each group. + If negative: number of entries to exclude from end of each group. + + Returns + ------- + Series or DataFrame + Subset of original Series or DataFrame as determined by n. + %(see_also)s + Examples + -------- + + >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], + ... columns=['A', 'B']) + >>> df.groupby('A').head(1) + A B + 0 1 2 + 2 5 6 + >>> df.groupby('A').head(-1) + A B + 0 1 2 + """ + mask = self._make_mask_from_positional_indexer(slice(None, n)) + return self._mask_selected_obj(mask) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def tail(self, n: int = 5) -> NDFrameT: + """ + Return last n rows of each group. + + Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows + from the original DataFrame with original index and order preserved + (``as_index`` flag is ignored). + + Parameters + ---------- + n : int + If positive: number of entries to include from end of each group. + If negative: number of entries to exclude from start of each group. + + Returns + ------- + Series or DataFrame + Subset of original Series or DataFrame as determined by n. + %(see_also)s + Examples + -------- + + >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]], + ... columns=['A', 'B']) + >>> df.groupby('A').tail(1) + A B + 1 a 2 + 3 b 2 + >>> df.groupby('A').tail(-1) + A B + 1 a 2 + 3 b 2 + """ + if n: + mask = self._make_mask_from_positional_indexer(slice(-n, None)) + else: + mask = self._make_mask_from_positional_indexer([]) + + return self._mask_selected_obj(mask) + + @final + def _mask_selected_obj(self, mask: npt.NDArray[np.bool_]) -> NDFrameT: + """ + Return _selected_obj with mask applied to the correct axis. + + Parameters + ---------- + mask : np.ndarray[bool] + Boolean mask to apply. + + Returns + ------- + Series or DataFrame + Filtered _selected_obj. + """ + ids = self._grouper.group_info[0] + mask = mask & (ids != -1) + + if self.axis == 0: + return self._selected_obj[mask] + else: + return self._selected_obj.iloc[:, mask] + + @final + def _reindex_output( + self, + output: OutputFrameOrSeries, + fill_value: Scalar = np.nan, + qs: npt.NDArray[np.float64] | None = None, + ) -> OutputFrameOrSeries: + """ + If we have categorical groupers, then we might want to make sure that + we have a fully re-indexed output to the levels. This means expanding + the output space to accommodate all values in the cartesian product of + our groups, regardless of whether they were observed in the data or + not. This will expand the output space if there are missing groups. + + The method returns early without modifying the input if the number of + groupings is less than 2, self.observed == True or none of the groupers + are categorical. + + Parameters + ---------- + output : Series or DataFrame + Object resulting from grouping and applying an operation. + fill_value : scalar, default np.nan + Value to use for unobserved categories if self.observed is False. + qs : np.ndarray[float64] or None, default None + quantile values, only relevant for quantile. + + Returns + ------- + Series or DataFrame + Object (potentially) re-indexed to include all possible groups. + """ + groupings = self._grouper.groupings + if len(groupings) == 1: + return output + + # if we only care about the observed values + # we are done + elif self.observed: + return output + + # reindexing only applies to a Categorical grouper + elif not any( + isinstance(ping.grouping_vector, (Categorical, CategoricalIndex)) + for ping in groupings + ): + return output + + levels_list = [ping._group_index for ping in groupings] + names = self._grouper.names + if qs is not None: + # error: Argument 1 to "append" of "list" has incompatible type + # "ndarray[Any, dtype[floating[_64Bit]]]"; expected "Index" + levels_list.append(qs) # type: ignore[arg-type] + names = names + [None] + index = MultiIndex.from_product(levels_list, names=names) + if self.sort: + index = index.sort_values() + + if self.as_index: + # Always holds for SeriesGroupBy unless GH#36507 is implemented + d = { + self.obj._get_axis_name(self.axis): index, + "copy": False, + "fill_value": fill_value, + } + return output.reindex(**d) # type: ignore[arg-type] + + # GH 13204 + # Here, the categorical in-axis groupers, which need to be fully + # expanded, are columns in `output`. An idea is to do: + # output = output.set_index(self._grouper.names) + # .reindex(index).reset_index() + # but special care has to be taken because of possible not-in-axis + # groupers. + # So, we manually select and drop the in-axis grouper columns, + # reindex `output`, and then reset the in-axis grouper columns. + + # Select in-axis groupers + in_axis_grps = [ + (i, ping.name) for (i, ping) in enumerate(groupings) if ping.in_axis + ] + if len(in_axis_grps) > 0: + g_nums, g_names = zip(*in_axis_grps) + output = output.drop(labels=list(g_names), axis=1) + + # Set a temp index and reindex (possibly expanding) + output = output.set_index(self._grouper.result_index).reindex( + index, copy=False, fill_value=fill_value + ) + + # Reset in-axis grouper columns + # (using level numbers `g_nums` because level names may not be unique) + if len(in_axis_grps) > 0: + output = output.reset_index(level=g_nums) + + return output.reset_index(drop=True) + + @final + def sample( + self, + n: int | None = None, + frac: float | None = None, + replace: bool = False, + weights: Sequence | Series | None = None, + random_state: RandomState | None = None, + ): + """ + Return a random sample of items from each group. + + You can use `random_state` for reproducibility. + + Parameters + ---------- + n : int, optional + Number of items to return for each group. Cannot be used with + `frac` and must be no larger than the smallest group unless + `replace` is True. Default is one if `frac` is None. + frac : float, optional + Fraction of items to return. Cannot be used with `n`. + replace : bool, default False + Allow or disallow sampling of the same row more than once. + weights : list-like, optional + Default None results in equal probability weighting. + If passed a list-like then values must have the same length as + the underlying DataFrame or Series object and will be used as + sampling probabilities after normalization within each group. + Values must be non-negative with at least one positive element + within each group. + random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional + If int, array-like, or BitGenerator, seed for random number generator. + If np.random.RandomState or np.random.Generator, use as given. + + .. versionchanged:: 1.4.0 + + np.random.Generator objects now accepted + + Returns + ------- + Series or DataFrame + A new object of same type as caller containing items randomly + sampled within each group from the caller object. + + See Also + -------- + DataFrame.sample: Generate random samples from a DataFrame object. + numpy.random.choice: Generate a random sample from a given 1-D numpy + array. + + Examples + -------- + >>> df = pd.DataFrame( + ... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)} + ... ) + >>> df + a b + 0 red 0 + 1 red 1 + 2 blue 2 + 3 blue 3 + 4 black 4 + 5 black 5 + + Select one row at random for each distinct value in column a. The + `random_state` argument can be used to guarantee reproducibility: + + >>> df.groupby("a").sample(n=1, random_state=1) + a b + 4 black 4 + 2 blue 2 + 1 red 1 + + Set `frac` to sample fixed proportions rather than counts: + + >>> df.groupby("a")["b"].sample(frac=0.5, random_state=2) + 5 5 + 2 2 + 0 0 + Name: b, dtype: int64 + + Control sample probabilities within groups by setting weights: + + >>> df.groupby("a").sample( + ... n=1, + ... weights=[1, 1, 1, 0, 0, 1], + ... random_state=1, + ... ) + a b + 5 black 5 + 2 blue 2 + 0 red 0 + """ # noqa: E501 + if self._selected_obj.empty: + # GH48459 prevent ValueError when object is empty + return self._selected_obj + size = sample.process_sampling_size(n, frac, replace) + if weights is not None: + weights_arr = sample.preprocess_weights( + self._selected_obj, weights, axis=self.axis + ) + + random_state = com.random_state(random_state) + + group_iterator = self._grouper.get_iterator(self._selected_obj, self.axis) + + sampled_indices = [] + for labels, obj in group_iterator: + grp_indices = self.indices[labels] + group_size = len(grp_indices) + if size is not None: + sample_size = size + else: + assert frac is not None + sample_size = round(frac * group_size) + + grp_sample = sample.sample( + group_size, + size=sample_size, + replace=replace, + weights=None if weights is None else weights_arr[grp_indices], + random_state=random_state, + ) + sampled_indices.append(grp_indices[grp_sample]) + + sampled_indices = np.concatenate(sampled_indices) + return self._selected_obj.take(sampled_indices, axis=self.axis) + + def _idxmax_idxmin( + self, + how: Literal["idxmax", "idxmin"], + ignore_unobserved: bool = False, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + ) -> NDFrameT: + """Compute idxmax/idxmin. + + Parameters + ---------- + how : {'idxmin', 'idxmax'} + Whether to compute idxmin or idxmax. + axis : {{0 or 'index', 1 or 'columns'}}, default None + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + If axis is not provided, grouper's axis is used. + numeric_only : bool, default False + Include only float, int, boolean columns. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + ignore_unobserved : bool, default False + When True and an unobserved group is encountered, do not raise. This used + for transform where unobserved groups do not play an impact on the result. + + Returns + ------- + Series or DataFrame + idxmax or idxmin for the groupby operation. + """ + if axis is not lib.no_default: + if axis is None: + axis = self.axis + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, how) + else: + axis = self.axis + + if not self.observed and any( + ping._passed_categorical for ping in self._grouper.groupings + ): + expected_len = np.prod( + [len(ping._group_index) for ping in self._grouper.groupings] + ) + if len(self._grouper.groupings) == 1: + result_len = len(self._grouper.groupings[0].grouping_vector.unique()) + else: + # result_index only contains observed groups in this case + result_len = len(self._grouper.result_index) + assert result_len <= expected_len + has_unobserved = result_len < expected_len + + raise_err: bool | np.bool_ = not ignore_unobserved and has_unobserved + # Only raise an error if there are columns to compute; otherwise we return + # an empty DataFrame with an index (possibly including unobserved) but no + # columns + data = self._obj_with_exclusions + if raise_err and isinstance(data, DataFrame): + if numeric_only: + data = data._get_numeric_data() + raise_err = len(data.columns) > 0 + + if raise_err: + raise ValueError( + f"Can't get {how} of an empty group due to unobserved categories. " + "Specify observed=True in groupby instead." + ) + elif not skipna: + if self._obj_with_exclusions.isna().any(axis=None): + warnings.warn( + f"The behavior of {type(self).__name__}.{how} with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if axis == 1: + try: + + def func(df): + method = getattr(df, how) + return method(axis=axis, skipna=skipna, numeric_only=numeric_only) + + func.__name__ = how + result = self._python_apply_general( + func, self._obj_with_exclusions, not_indexed_same=True + ) + except ValueError as err: + name = "argmax" if how == "idxmax" else "argmin" + if f"attempt to get {name} of an empty sequence" in str(err): + raise ValueError( + f"Can't get {how} of an empty group due to unobserved " + "categories. Specify observed=True in groupby instead." + ) from None + raise + return result + + result = self._agg_general( + numeric_only=numeric_only, + min_count=1, + alias=how, + skipna=skipna, + ) + return result + + def _wrap_idxmax_idxmin(self, res: NDFrameT) -> NDFrameT: + index = self.obj._get_axis(self.axis) + if res.size == 0: + result = res.astype(index.dtype) + else: + if isinstance(index, MultiIndex): + index = index.to_flat_index() + values = res._values + assert isinstance(values, np.ndarray) + na_value = na_value_for_dtype(index.dtype, compat=False) + if isinstance(res, Series): + # mypy: expression has type "Series", variable has type "NDFrameT" + result = res._constructor( # type: ignore[assignment] + index.array.take(values, allow_fill=True, fill_value=na_value), + index=res.index, + name=res.name, + ) + else: + data = {} + for k, column_values in enumerate(values.T): + data[k] = index.array.take( + column_values, allow_fill=True, fill_value=na_value + ) + result = self.obj._constructor(data, index=res.index) + result.columns = res.columns + return result + + +@doc(GroupBy) +def get_groupby( + obj: NDFrame, + by: _KeysArgType | None = None, + axis: AxisInt = 0, + grouper: ops.BaseGrouper | None = None, + group_keys: bool = True, +) -> GroupBy: + klass: type[GroupBy] + if isinstance(obj, Series): + from pandas.core.groupby.generic import SeriesGroupBy + + klass = SeriesGroupBy + elif isinstance(obj, DataFrame): + from pandas.core.groupby.generic import DataFrameGroupBy + + klass = DataFrameGroupBy + else: # pragma: no cover + raise TypeError(f"invalid type: {obj}") + + return klass( + obj=obj, + keys=by, + axis=axis, + grouper=grouper, + group_keys=group_keys, + ) + + +def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiIndex: + """ + Insert the sequence 'qs' of quantiles as the inner-most level of a MultiIndex. + + The quantile level in the MultiIndex is a repeated copy of 'qs'. + + Parameters + ---------- + idx : Index + qs : np.ndarray[float64] + + Returns + ------- + MultiIndex + """ + nqs = len(qs) + lev_codes, lev = Index(qs).factorize() + lev_codes = coerce_indexer_dtype(lev_codes, lev) + + if idx._is_multi: + idx = cast(MultiIndex, idx) + levels = list(idx.levels) + [lev] + codes = [np.repeat(x, nqs) for x in idx.codes] + [np.tile(lev_codes, len(idx))] + mi = MultiIndex(levels=levels, codes=codes, names=idx.names + [None]) + else: + nidx = len(idx) + idx_codes = coerce_indexer_dtype(np.arange(nidx), idx) + levels = [idx, lev] + codes = [np.repeat(idx_codes, nqs), np.tile(lev_codes, nidx)] + mi = MultiIndex(levels=levels, codes=codes, names=[idx.name, None]) + + return mi + + +# GH#7155 +_apply_groupings_depr = ( + "{}.{} operated on the grouping columns. This behavior is deprecated, " + "and in a future version of pandas the grouping columns will be excluded " + "from the operation. Either pass `include_groups=False` to exclude the " + "groupings or explicitly select the grouping columns after groupby to silence " + "this warning." +) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/grouper.py b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/grouper.py new file mode 100644 index 0000000000000000000000000000000000000000..e2224caad9e846f5e386019e6da5b47d16ab3694 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/grouper.py @@ -0,0 +1,1102 @@ +""" +Provide user facing operators for doing the split part of the +split-apply-combine paradigm. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + final, +) +import warnings + +import numpy as np + +from pandas._config import ( + using_copy_on_write, + warn_copy_on_write, +) + +from pandas._libs import lib +from pandas._libs.tslibs import OutOfBoundsDatetime +from pandas.errors import InvalidIndexError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_list_like, + is_scalar, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas.core import algorithms +from pandas.core.arrays import ( + Categorical, + ExtensionArray, +) +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.core.groupby import ops +from pandas.core.groupby.categorical import recode_for_groupby +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, + MultiIndex, +) +from pandas.core.series import Series + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + + from pandas._typing import ( + ArrayLike, + Axis, + NDFrameT, + npt, + ) + + from pandas.core.generic import NDFrame + + +class Grouper: + """ + A Grouper allows the user to specify a groupby instruction for an object. + + This specification will select a column via the key parameter, or if the + level and/or axis parameters are given, a level of the index of the target + object. + + If `axis` and/or `level` are passed as keywords to both `Grouper` and + `groupby`, the values passed to `Grouper` take precedence. + + Parameters + ---------- + key : str, defaults to None + Groupby key, which selects the grouping column of the target. + level : name/number, defaults to None + The level for the target index. + freq : str / frequency object, defaults to None + This will groupby the specified frequency if the target selection + (via key or level) is a datetime-like object. For full specification + of available frequencies, please see `here + `_. + axis : str, int, defaults to 0 + Number/name of the axis. + sort : bool, default to False + Whether to sort the resulting labels. + closed : {'left' or 'right'} + Closed end of interval. Only when `freq` parameter is passed. + label : {'left' or 'right'} + Interval boundary to use for labeling. + Only when `freq` parameter is passed. + convention : {'start', 'end', 'e', 's'} + If grouper is PeriodIndex and `freq` parameter is passed. + + origin : Timestamp or str, default 'start_day' + The timestamp on which to adjust the grouping. The timezone of origin must + match the timezone of the index. + If string, must be one of the following: + + - 'epoch': `origin` is 1970-01-01 + - 'start': `origin` is the first value of the timeseries + - 'start_day': `origin` is the first day at midnight of the timeseries + + - 'end': `origin` is the last value of the timeseries + - 'end_day': `origin` is the ceiling midnight of the last day + + .. versionadded:: 1.3.0 + + offset : Timedelta or str, default is None + An offset timedelta added to the origin. + + dropna : bool, default True + If True, and if group keys contain NA values, NA values together with + row/column will be dropped. If False, NA values will also be treated as + the key in groups. + + Returns + ------- + Grouper or pandas.api.typing.TimeGrouper + A TimeGrouper is returned if ``freq`` is not ``None``. Otherwise, a Grouper + is returned. + + Examples + -------- + ``df.groupby(pd.Grouper(key="Animal"))`` is equivalent to ``df.groupby('Animal')`` + + >>> df = pd.DataFrame( + ... { + ... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"], + ... "Speed": [100, 5, 200, 300, 15], + ... } + ... ) + >>> df + Animal Speed + 0 Falcon 100 + 1 Parrot 5 + 2 Falcon 200 + 3 Falcon 300 + 4 Parrot 15 + >>> df.groupby(pd.Grouper(key="Animal")).mean() + Speed + Animal + Falcon 200.0 + Parrot 10.0 + + Specify a resample operation on the column 'Publish date' + + >>> df = pd.DataFrame( + ... { + ... "Publish date": [ + ... pd.Timestamp("2000-01-02"), + ... pd.Timestamp("2000-01-02"), + ... pd.Timestamp("2000-01-09"), + ... pd.Timestamp("2000-01-16") + ... ], + ... "ID": [0, 1, 2, 3], + ... "Price": [10, 20, 30, 40] + ... } + ... ) + >>> df + Publish date ID Price + 0 2000-01-02 0 10 + 1 2000-01-02 1 20 + 2 2000-01-09 2 30 + 3 2000-01-16 3 40 + >>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean() + ID Price + Publish date + 2000-01-02 0.5 15.0 + 2000-01-09 2.0 30.0 + 2000-01-16 3.0 40.0 + + If you want to adjust the start of the bins based on a fixed timestamp: + + >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' + >>> rng = pd.date_range(start, end, freq='7min') + >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) + >>> ts + 2000-10-01 23:30:00 0 + 2000-10-01 23:37:00 3 + 2000-10-01 23:44:00 6 + 2000-10-01 23:51:00 9 + 2000-10-01 23:58:00 12 + 2000-10-02 00:05:00 15 + 2000-10-02 00:12:00 18 + 2000-10-02 00:19:00 21 + 2000-10-02 00:26:00 24 + Freq: 7min, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min')).sum() + 2000-10-01 23:14:00 0 + 2000-10-01 23:31:00 9 + 2000-10-01 23:48:00 21 + 2000-10-02 00:05:00 54 + 2000-10-02 00:22:00 24 + Freq: 17min, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum() + 2000-10-01 23:18:00 0 + 2000-10-01 23:35:00 18 + 2000-10-01 23:52:00 27 + 2000-10-02 00:09:00 39 + 2000-10-02 00:26:00 24 + Freq: 17min, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum() + 2000-10-01 23:24:00 3 + 2000-10-01 23:41:00 15 + 2000-10-01 23:58:00 45 + 2000-10-02 00:15:00 45 + Freq: 17min, dtype: int64 + + If you want to adjust the start of the bins with an `offset` Timedelta, the two + following lines are equivalent: + + >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum() + 2000-10-01 23:30:00 9 + 2000-10-01 23:47:00 21 + 2000-10-02 00:04:00 54 + 2000-10-02 00:21:00 24 + Freq: 17min, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum() + 2000-10-01 23:30:00 9 + 2000-10-01 23:47:00 21 + 2000-10-02 00:04:00 54 + 2000-10-02 00:21:00 24 + Freq: 17min, dtype: int64 + + To replace the use of the deprecated `base` argument, you can now use `offset`, + in this example it is equivalent to have `base=2`: + + >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum() + 2000-10-01 23:16:00 0 + 2000-10-01 23:33:00 9 + 2000-10-01 23:50:00 36 + 2000-10-02 00:07:00 39 + 2000-10-02 00:24:00 24 + Freq: 17min, dtype: int64 + """ + + sort: bool + dropna: bool + _gpr_index: Index | None + _grouper: Index | None + + _attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort", "dropna") + + def __new__(cls, *args, **kwargs): + if kwargs.get("freq") is not None: + from pandas.core.resample import TimeGrouper + + cls = TimeGrouper + return super().__new__(cls) + + def __init__( + self, + key=None, + level=None, + freq=None, + axis: Axis | lib.NoDefault = lib.no_default, + sort: bool = False, + dropna: bool = True, + ) -> None: + if type(self) is Grouper: + # i.e. not TimeGrouper + if axis is not lib.no_default: + warnings.warn( + "Grouper axis keyword is deprecated and will be removed in a " + "future version. To group on axis=1, use obj.T.groupby(...) " + "instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + axis = 0 + if axis is lib.no_default: + axis = 0 + + self.key = key + self.level = level + self.freq = freq + self.axis = axis + self.sort = sort + self.dropna = dropna + + self._grouper_deprecated = None + self._indexer_deprecated: npt.NDArray[np.intp] | None = None + self._obj_deprecated = None + self._gpr_index = None + self.binner = None + self._grouper = None + self._indexer: npt.NDArray[np.intp] | None = None + + def _get_grouper( + self, obj: NDFrameT, validate: bool = True + ) -> tuple[ops.BaseGrouper, NDFrameT]: + """ + Parameters + ---------- + obj : Series or DataFrame + validate : bool, default True + if True, validate the grouper + + Returns + ------- + a tuple of grouper, obj (possibly sorted) + """ + obj, _, _ = self._set_grouper(obj) + grouper, _, obj = get_grouper( + obj, + [self.key], + axis=self.axis, + level=self.level, + sort=self.sort, + validate=validate, + dropna=self.dropna, + ) + # Without setting this, subsequent lookups to .groups raise + # error: Incompatible types in assignment (expression has type "BaseGrouper", + # variable has type "None") + self._grouper_deprecated = grouper # type: ignore[assignment] + + return grouper, obj + + def _set_grouper( + self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None + ) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]: + """ + given an object and the specifications, setup the internal grouper + for this particular specification + + Parameters + ---------- + obj : Series or DataFrame + sort : bool, default False + whether the resulting grouper should be sorted + gpr_index : Index or None, default None + + Returns + ------- + NDFrame + Index + np.ndarray[np.intp] | None + """ + assert obj is not None + + if self.key is not None and self.level is not None: + raise ValueError("The Grouper cannot specify both a key and a level!") + + # Keep self._grouper value before overriding + if self._grouper is None: + # TODO: What are we assuming about subsequent calls? + self._grouper = gpr_index + self._indexer = self._indexer_deprecated + + # the key must be a valid info item + if self.key is not None: + key = self.key + # The 'on' is already defined + if getattr(gpr_index, "name", None) == key and isinstance(obj, Series): + # Sometimes self._grouper will have been resorted while + # obj has not. In this case there is a mismatch when we + # call self._grouper.take(obj.index) so we need to undo the sorting + # before we call _grouper.take. + assert self._grouper is not None + if self._indexer is not None: + reverse_indexer = self._indexer.argsort() + unsorted_ax = self._grouper.take(reverse_indexer) + ax = unsorted_ax.take(obj.index) + else: + ax = self._grouper.take(obj.index) + else: + if key not in obj._info_axis: + raise KeyError(f"The grouper name {key} is not found") + ax = Index(obj[key], name=key) + + else: + ax = obj._get_axis(self.axis) + if self.level is not None: + level = self.level + + # if a level is given it must be a mi level or + # equivalent to the axis name + if isinstance(ax, MultiIndex): + level = ax._get_level_number(level) + ax = Index(ax._get_level_values(level), name=ax.names[level]) + + else: + if level not in (0, ax.name): + raise ValueError(f"The level {level} is not valid") + + # possibly sort + indexer: npt.NDArray[np.intp] | None = None + if (self.sort or sort) and not ax.is_monotonic_increasing: + # use stable sort to support first, last, nth + # TODO: why does putting na_position="first" fix datetimelike cases? + indexer = self._indexer_deprecated = ax.array.argsort( + kind="mergesort", na_position="first" + ) + ax = ax.take(indexer) + obj = obj.take(indexer, axis=self.axis) + + # error: Incompatible types in assignment (expression has type + # "NDFrameT", variable has type "None") + self._obj_deprecated = obj # type: ignore[assignment] + self._gpr_index = ax + return obj, ax, indexer + + @final + @property + def ax(self) -> Index: + warnings.warn( + f"{type(self).__name__}.ax is deprecated and will be removed in a " + "future version. Use Resampler.ax instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + index = self._gpr_index + if index is None: + raise ValueError("_set_grouper must be called before ax is accessed") + return index + + @final + @property + def indexer(self): + warnings.warn( + f"{type(self).__name__}.indexer is deprecated and will be removed " + "in a future version. Use Resampler.indexer instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._indexer_deprecated + + @final + @property + def obj(self): + # TODO(3.0): enforcing these deprecations on Grouper should close + # GH#25564, GH#41930 + warnings.warn( + f"{type(self).__name__}.obj is deprecated and will be removed " + "in a future version. Use GroupBy.indexer instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._obj_deprecated + + @final + @property + def grouper(self): + warnings.warn( + f"{type(self).__name__}.grouper is deprecated and will be removed " + "in a future version. Use GroupBy.grouper instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._grouper_deprecated + + @final + @property + def groups(self): + warnings.warn( + f"{type(self).__name__}.groups is deprecated and will be removed " + "in a future version. Use GroupBy.groups instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # error: "None" has no attribute "groups" + return self._grouper_deprecated.groups # type: ignore[attr-defined] + + @final + def __repr__(self) -> str: + attrs_list = ( + f"{attr_name}={repr(getattr(self, attr_name))}" + for attr_name in self._attributes + if getattr(self, attr_name) is not None + ) + attrs = ", ".join(attrs_list) + cls_name = type(self).__name__ + return f"{cls_name}({attrs})" + + +@final +class Grouping: + """ + Holds the grouping information for a single key + + Parameters + ---------- + index : Index + grouper : + obj : DataFrame or Series + name : Label + level : + observed : bool, default False + If we are a Categorical, use the observed values + in_axis : if the Grouping is a column in self.obj and hence among + Groupby.exclusions list + dropna : bool, default True + Whether to drop NA groups. + uniques : Array-like, optional + When specified, will be used for unique values. Enables including empty groups + in the result for a BinGrouper. Must not contain duplicates. + + Attributes + ------- + indices : dict + Mapping of {group -> index_list} + codes : ndarray + Group codes + group_index : Index or None + unique groups + groups : dict + Mapping of {group -> label_list} + """ + + _codes: npt.NDArray[np.signedinteger] | None = None + _all_grouper: Categorical | None + _orig_cats: Index | None + _index: Index + + def __init__( + self, + index: Index, + grouper=None, + obj: NDFrame | None = None, + level=None, + sort: bool = True, + observed: bool = False, + in_axis: bool = False, + dropna: bool = True, + uniques: ArrayLike | None = None, + ) -> None: + self.level = level + self._orig_grouper = grouper + grouping_vector = _convert_grouper(index, grouper) + self._all_grouper = None + self._orig_cats = None + self._index = index + self._sort = sort + self.obj = obj + self._observed = observed + self.in_axis = in_axis + self._dropna = dropna + self._uniques = uniques + + # we have a single grouper which may be a myriad of things, + # some of which are dependent on the passing in level + + ilevel = self._ilevel + if ilevel is not None: + # In extant tests, the new self.grouping_vector matches + # `index.get_level_values(ilevel)` whenever + # mapper is None and isinstance(index, MultiIndex) + if isinstance(index, MultiIndex): + index_level = index.get_level_values(ilevel) + else: + index_level = index + + if grouping_vector is None: + grouping_vector = index_level + else: + mapper = grouping_vector + grouping_vector = index_level.map(mapper) + + # a passed Grouper like, directly get the grouper in the same way + # as single grouper groupby, use the group_info to get codes + elif isinstance(grouping_vector, Grouper): + # get the new grouper; we already have disambiguated + # what key/level refer to exactly, don't need to + # check again as we have by this point converted these + # to an actual value (rather than a pd.Grouper) + assert self.obj is not None # for mypy + newgrouper, newobj = grouping_vector._get_grouper(self.obj, validate=False) + self.obj = newobj + + if isinstance(newgrouper, ops.BinGrouper): + # TODO: can we unwrap this and get a tighter typing + # for self.grouping_vector? + grouping_vector = newgrouper + else: + # ops.BaseGrouper + # TODO: 2023-02-03 no test cases with len(newgrouper.groupings) > 1. + # If that were to occur, would we be throwing out information? + # error: Cannot determine type of "grouping_vector" [has-type] + ng = newgrouper.groupings[0].grouping_vector # type: ignore[has-type] + # use Index instead of ndarray so we can recover the name + grouping_vector = Index(ng, name=newgrouper.result_index.name) + + elif not isinstance( + grouping_vector, (Series, Index, ExtensionArray, np.ndarray) + ): + # no level passed + if getattr(grouping_vector, "ndim", 1) != 1: + t = str(type(grouping_vector)) + raise ValueError(f"Grouper for '{t}' not 1-dimensional") + + grouping_vector = index.map(grouping_vector) + + if not ( + hasattr(grouping_vector, "__len__") + and len(grouping_vector) == len(index) + ): + grper = pprint_thing(grouping_vector) + errmsg = ( + "Grouper result violates len(labels) == " + f"len(data)\nresult: {grper}" + ) + raise AssertionError(errmsg) + + if isinstance(grouping_vector, np.ndarray): + if grouping_vector.dtype.kind in "mM": + # if we have a date/time-like grouper, make sure that we have + # Timestamps like + # TODO 2022-10-08 we only have one test that gets here and + # values are already in nanoseconds in that case. + grouping_vector = Series(grouping_vector).to_numpy() + elif isinstance(getattr(grouping_vector, "dtype", None), CategoricalDtype): + # a passed Categorical + self._orig_cats = grouping_vector.categories + grouping_vector, self._all_grouper = recode_for_groupby( + grouping_vector, sort, observed + ) + + self.grouping_vector = grouping_vector + + def __repr__(self) -> str: + return f"Grouping({self.name})" + + def __iter__(self) -> Iterator: + return iter(self.indices) + + @cache_readonly + def _passed_categorical(self) -> bool: + dtype = getattr(self.grouping_vector, "dtype", None) + return isinstance(dtype, CategoricalDtype) + + @cache_readonly + def name(self) -> Hashable: + ilevel = self._ilevel + if ilevel is not None: + return self._index.names[ilevel] + + if isinstance(self._orig_grouper, (Index, Series)): + return self._orig_grouper.name + + elif isinstance(self.grouping_vector, ops.BaseGrouper): + return self.grouping_vector.result_index.name + + elif isinstance(self.grouping_vector, Index): + return self.grouping_vector.name + + # otherwise we have ndarray or ExtensionArray -> no name + return None + + @cache_readonly + def _ilevel(self) -> int | None: + """ + If necessary, converted index level name to index level position. + """ + level = self.level + if level is None: + return None + if not isinstance(level, int): + index = self._index + if level not in index.names: + raise AssertionError(f"Level {level} not in index") + return index.names.index(level) + return level + + @property + def ngroups(self) -> int: + return len(self._group_index) + + @cache_readonly + def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: + # we have a list of groupers + if isinstance(self.grouping_vector, ops.BaseGrouper): + return self.grouping_vector.indices + + values = Categorical(self.grouping_vector) + return values._reverse_indexer() + + @property + def codes(self) -> npt.NDArray[np.signedinteger]: + return self._codes_and_uniques[0] + + @cache_readonly + def _group_arraylike(self) -> ArrayLike: + """ + Analogous to result_index, but holding an ArrayLike to ensure + we can retain ExtensionDtypes. + """ + if self._all_grouper is not None: + # retain dtype for categories, including unobserved ones + return self._result_index._values + + elif self._passed_categorical: + return self._group_index._values + + return self._codes_and_uniques[1] + + @property + def group_arraylike(self) -> ArrayLike: + """ + Analogous to result_index, but holding an ArrayLike to ensure + we can retain ExtensionDtypes. + """ + warnings.warn( + "group_arraylike is deprecated and will be removed in a future " + "version of pandas", + category=FutureWarning, + stacklevel=find_stack_level(), + ) + return self._group_arraylike + + @cache_readonly + def _result_index(self) -> Index: + # result_index retains dtype for categories, including unobserved ones, + # which group_index does not + if self._all_grouper is not None: + group_idx = self._group_index + assert isinstance(group_idx, CategoricalIndex) + cats = self._orig_cats + # set_categories is dynamically added + return group_idx.set_categories(cats) # type: ignore[attr-defined] + return self._group_index + + @property + def result_index(self) -> Index: + warnings.warn( + "result_index is deprecated and will be removed in a future " + "version of pandas", + category=FutureWarning, + stacklevel=find_stack_level(), + ) + return self._result_index + + @cache_readonly + def _group_index(self) -> Index: + codes, uniques = self._codes_and_uniques + if not self._dropna and self._passed_categorical: + assert isinstance(uniques, Categorical) + if self._sort and (codes == len(uniques)).any(): + # Add NA value on the end when sorting + uniques = Categorical.from_codes( + np.append(uniques.codes, [-1]), uniques.categories, validate=False + ) + elif len(codes) > 0: + # Need to determine proper placement of NA value when not sorting + cat = self.grouping_vector + na_idx = (cat.codes < 0).argmax() + if cat.codes[na_idx] < 0: + # count number of unique codes that comes before the nan value + na_unique_idx = algorithms.nunique_ints(cat.codes[:na_idx]) + new_codes = np.insert(uniques.codes, na_unique_idx, -1) + uniques = Categorical.from_codes( + new_codes, uniques.categories, validate=False + ) + return Index._with_infer(uniques, name=self.name) + + @property + def group_index(self) -> Index: + warnings.warn( + "group_index is deprecated and will be removed in a future " + "version of pandas", + category=FutureWarning, + stacklevel=find_stack_level(), + ) + return self._group_index + + @cache_readonly + def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]: + uniques: ArrayLike + if self._passed_categorical: + # we make a CategoricalIndex out of the cat grouper + # preserving the categories / ordered attributes; + # doesn't (yet - GH#46909) handle dropna=False + cat = self.grouping_vector + categories = cat.categories + + if self._observed: + ucodes = algorithms.unique1d(cat.codes) + ucodes = ucodes[ucodes != -1] + if self._sort: + ucodes = np.sort(ucodes) + else: + ucodes = np.arange(len(categories)) + + uniques = Categorical.from_codes( + codes=ucodes, categories=categories, ordered=cat.ordered, validate=False + ) + + codes = cat.codes + if not self._dropna: + na_mask = codes < 0 + if np.any(na_mask): + if self._sort: + # Replace NA codes with `largest code + 1` + na_code = len(categories) + codes = np.where(na_mask, na_code, codes) + else: + # Insert NA code into the codes based on first appearance + # A negative code must exist, no need to check codes[na_idx] < 0 + na_idx = na_mask.argmax() + # count number of unique codes that comes before the nan value + na_code = algorithms.nunique_ints(codes[:na_idx]) + codes = np.where(codes >= na_code, codes + 1, codes) + codes = np.where(na_mask, na_code, codes) + + if not self._observed: + uniques = uniques.reorder_categories(self._orig_cats) + + return codes, uniques + + elif isinstance(self.grouping_vector, ops.BaseGrouper): + # we have a list of groupers + codes = self.grouping_vector.codes_info + uniques = self.grouping_vector.result_index._values + elif self._uniques is not None: + # GH#50486 Code grouping_vector using _uniques; allows + # including uniques that are not present in grouping_vector. + cat = Categorical(self.grouping_vector, categories=self._uniques) + codes = cat.codes + uniques = self._uniques + else: + # GH35667, replace dropna=False with use_na_sentinel=False + # error: Incompatible types in assignment (expression has type "Union[ + # ndarray[Any, Any], Index]", variable has type "Categorical") + codes, uniques = algorithms.factorize( # type: ignore[assignment] + self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna + ) + return codes, uniques + + @cache_readonly + def groups(self) -> dict[Hashable, np.ndarray]: + cats = Categorical.from_codes(self.codes, self._group_index, validate=False) + return self._index.groupby(cats) + + +def get_grouper( + obj: NDFrameT, + key=None, + axis: Axis = 0, + level=None, + sort: bool = True, + observed: bool = False, + validate: bool = True, + dropna: bool = True, +) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]: + """ + Create and return a BaseGrouper, which is an internal + mapping of how to create the grouper indexers. + This may be composed of multiple Grouping objects, indicating + multiple groupers + + Groupers are ultimately index mappings. They can originate as: + index mappings, keys to columns, functions, or Groupers + + Groupers enable local references to axis,level,sort, while + the passed in axis, level, and sort are 'global'. + + This routine tries to figure out what the passing in references + are and then creates a Grouping for each one, combined into + a BaseGrouper. + + If observed & we have a categorical grouper, only show the observed + values. + + If validate, then check for key/level overlaps. + + """ + group_axis = obj._get_axis(axis) + + # validate that the passed single level is compatible with the passed + # axis of the object + if level is not None: + # TODO: These if-block and else-block are almost same. + # MultiIndex instance check is removable, but it seems that there are + # some processes only for non-MultiIndex in else-block, + # eg. `obj.index.name != level`. We have to consider carefully whether + # these are applicable for MultiIndex. Even if these are applicable, + # we need to check if it makes no side effect to subsequent processes + # on the outside of this condition. + # (GH 17621) + if isinstance(group_axis, MultiIndex): + if is_list_like(level) and len(level) == 1: + level = level[0] + + if key is None and is_scalar(level): + # Get the level values from group_axis + key = group_axis.get_level_values(level) + level = None + + else: + # allow level to be a length-one list-like object + # (e.g., level=[0]) + # GH 13901 + if is_list_like(level): + nlevels = len(level) + if nlevels == 1: + level = level[0] + elif nlevels == 0: + raise ValueError("No group keys passed!") + else: + raise ValueError("multiple levels only valid with MultiIndex") + + if isinstance(level, str): + if obj._get_axis(axis).name != level: + raise ValueError( + f"level name {level} is not the name " + f"of the {obj._get_axis_name(axis)}" + ) + elif level > 0 or level < -1: + raise ValueError("level > 0 or level < -1 only valid with MultiIndex") + + # NOTE: `group_axis` and `group_axis.get_level_values(level)` + # are same in this section. + level = None + key = group_axis + + # a passed-in Grouper, directly convert + if isinstance(key, Grouper): + grouper, obj = key._get_grouper(obj, validate=False) + if key.key is None: + return grouper, frozenset(), obj + else: + return grouper, frozenset({key.key}), obj + + # already have a BaseGrouper, just return it + elif isinstance(key, ops.BaseGrouper): + return key, frozenset(), obj + + if not isinstance(key, list): + keys = [key] + match_axis_length = False + else: + keys = key + match_axis_length = len(keys) == len(group_axis) + + # what are we after, exactly? + any_callable = any(callable(g) or isinstance(g, dict) for g in keys) + any_groupers = any(isinstance(g, (Grouper, Grouping)) for g in keys) + any_arraylike = any( + isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys + ) + + # is this an index replacement? + if ( + not any_callable + and not any_arraylike + and not any_groupers + and match_axis_length + and level is None + ): + if isinstance(obj, DataFrame): + all_in_columns_index = all( + g in obj.columns or g in obj.index.names for g in keys + ) + else: + assert isinstance(obj, Series) + all_in_columns_index = all(g in obj.index.names for g in keys) + + if not all_in_columns_index: + keys = [com.asarray_tuplesafe(keys)] + + if isinstance(level, (tuple, list)): + if key is None: + keys = [None] * len(level) + levels = level + else: + levels = [level] * len(keys) + + groupings: list[Grouping] = [] + exclusions: set[Hashable] = set() + + # if the actual grouper should be obj[key] + def is_in_axis(key) -> bool: + if not _is_label_like(key): + if obj.ndim == 1: + return False + + # items -> .columns for DataFrame, .index for Series + items = obj.axes[-1] + try: + items.get_loc(key) + except (KeyError, TypeError, InvalidIndexError): + # TypeError shows up here if we pass e.g. an Index + return False + + return True + + # if the grouper is obj[name] + def is_in_obj(gpr) -> bool: + if not hasattr(gpr, "name"): + return False + if using_copy_on_write() or warn_copy_on_write(): + # For the CoW case, we check the references to determine if the + # series is part of the object + try: + obj_gpr_column = obj[gpr.name] + except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime): + return False + if isinstance(gpr, Series) and isinstance(obj_gpr_column, Series): + return gpr._mgr.references_same_values( # type: ignore[union-attr] + obj_gpr_column._mgr, 0 # type: ignore[arg-type] + ) + return False + try: + return gpr is obj[gpr.name] + except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime): + # IndexError reached in e.g. test_skip_group_keys when we pass + # lambda here + # InvalidIndexError raised on key-types inappropriate for index, + # e.g. DatetimeIndex.get_loc(tuple()) + # OutOfBoundsDatetime raised when obj is a Series with DatetimeIndex + # and gpr.name is month str + return False + + for gpr, level in zip(keys, levels): + if is_in_obj(gpr): # df.groupby(df['name']) + in_axis = True + exclusions.add(gpr.name) + + elif is_in_axis(gpr): # df.groupby('name') + if obj.ndim != 1 and gpr in obj: + if validate: + obj._check_label_or_level_ambiguity(gpr, axis=axis) + in_axis, name, gpr = True, gpr, obj[gpr] + if gpr.ndim != 1: + # non-unique columns; raise here to get the name in the + # exception message + raise ValueError(f"Grouper for '{name}' not 1-dimensional") + exclusions.add(name) + elif obj._is_level_reference(gpr, axis=axis): + in_axis, level, gpr = False, gpr, None + else: + raise KeyError(gpr) + elif isinstance(gpr, Grouper) and gpr.key is not None: + # Add key to exclusions + exclusions.add(gpr.key) + in_axis = True + else: + in_axis = False + + # create the Grouping + # allow us to passing the actual Grouping as the gpr + ping = ( + Grouping( + group_axis, + gpr, + obj=obj, + level=level, + sort=sort, + observed=observed, + in_axis=in_axis, + dropna=dropna, + ) + if not isinstance(gpr, Grouping) + else gpr + ) + + groupings.append(ping) + + if len(groupings) == 0 and len(obj): + raise ValueError("No group keys passed!") + if len(groupings) == 0: + groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp))) + + # create the internals grouper + grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, dropna=dropna) + return grouper, frozenset(exclusions), obj + + +def _is_label_like(val) -> bool: + return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val)) + + +def _convert_grouper(axis: Index, grouper): + if isinstance(grouper, dict): + return grouper.get + elif isinstance(grouper, Series): + if grouper.index.equals(axis): + return grouper._values + else: + return grouper.reindex(axis)._values + elif isinstance(grouper, MultiIndex): + return grouper._values + elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)): + if len(grouper) != len(axis): + raise ValueError("Grouper and axis must be same length") + + if isinstance(grouper, (list, tuple)): + grouper = com.asarray_tuplesafe(grouper) + return grouper + else: + return grouper diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/indexing.py b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..a3c5ab8edc94e4f91175891282252d0e8cdfd3ec --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/indexing.py @@ -0,0 +1,304 @@ +from __future__ import annotations + +from collections.abc import Iterable +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) + +import numpy as np + +from pandas.util._decorators import ( + cache_readonly, + doc, +) + +from pandas.core.dtypes.common import ( + is_integer, + is_list_like, +) + +if TYPE_CHECKING: + from pandas._typing import PositionalIndexer + + from pandas import ( + DataFrame, + Series, + ) + from pandas.core.groupby import groupby + + +class GroupByIndexingMixin: + """ + Mixin for adding ._positional_selector to GroupBy. + """ + + @cache_readonly + def _positional_selector(self) -> GroupByPositionalSelector: + """ + Return positional selection for each group. + + ``groupby._positional_selector[i:j]`` is similar to + ``groupby.apply(lambda x: x.iloc[i:j])`` + but much faster and preserves the original index and order. + + ``_positional_selector[]`` is compatible with and extends :meth:`~GroupBy.head` + and :meth:`~GroupBy.tail`. For example: + + - ``head(5)`` + - ``_positional_selector[5:-5]`` + - ``tail(5)`` + + together return all the rows. + + Allowed inputs for the index are: + + - An integer valued iterable, e.g. ``range(2, 4)``. + - A comma separated list of integers and slices, e.g. ``5``, ``2, 4``, ``2:4``. + + The output format is the same as :meth:`~GroupBy.head` and + :meth:`~GroupBy.tail`, namely + a subset of the ``DataFrame`` or ``Series`` with the index and order preserved. + + Returns + ------- + Series + The filtered subset of the original Series. + DataFrame + The filtered subset of the original DataFrame. + + See Also + -------- + DataFrame.iloc : Purely integer-location based indexing for selection by + position. + GroupBy.head : Return first n rows of each group. + GroupBy.tail : Return last n rows of each group. + GroupBy.nth : Take the nth row from each group if n is an int, or a + subset of rows, if n is a list of ints. + + Notes + ----- + - The slice step cannot be negative. + - If the index specification results in overlaps, the item is not duplicated. + - If the index specification changes the order of items, then + they are returned in their original order. + By contrast, ``DataFrame.iloc`` can change the row order. + - ``groupby()`` parameters such as as_index and dropna are ignored. + + The differences between ``_positional_selector[]`` and :meth:`~GroupBy.nth` + with ``as_index=False`` are: + + - Input to ``_positional_selector`` can include + one or more slices whereas ``nth`` + just handles an integer or a list of integers. + - ``_positional_selector`` can accept a slice relative to the + last row of each group. + - ``_positional_selector`` does not have an equivalent to the + ``nth()`` ``dropna`` parameter. + + Examples + -------- + >>> df = pd.DataFrame([["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]], + ... columns=["A", "B"]) + >>> df.groupby("A")._positional_selector[1:2] + A B + 1 a 2 + 4 b 5 + + >>> df.groupby("A")._positional_selector[1, -1] + A B + 1 a 2 + 2 a 3 + 4 b 5 + """ + if TYPE_CHECKING: + # pylint: disable-next=used-before-assignment + groupby_self = cast(groupby.GroupBy, self) + else: + groupby_self = self + + return GroupByPositionalSelector(groupby_self) + + def _make_mask_from_positional_indexer( + self, + arg: PositionalIndexer | tuple, + ) -> np.ndarray: + if is_list_like(arg): + if all(is_integer(i) for i in cast(Iterable, arg)): + mask = self._make_mask_from_list(cast(Iterable[int], arg)) + else: + mask = self._make_mask_from_tuple(cast(tuple, arg)) + + elif isinstance(arg, slice): + mask = self._make_mask_from_slice(arg) + elif is_integer(arg): + mask = self._make_mask_from_int(cast(int, arg)) + else: + raise TypeError( + f"Invalid index {type(arg)}. " + "Must be integer, list-like, slice or a tuple of " + "integers and slices" + ) + + if isinstance(mask, bool): + if mask: + mask = self._ascending_count >= 0 + else: + mask = self._ascending_count < 0 + + return cast(np.ndarray, mask) + + def _make_mask_from_int(self, arg: int) -> np.ndarray: + if arg >= 0: + return self._ascending_count == arg + else: + return self._descending_count == (-arg - 1) + + def _make_mask_from_list(self, args: Iterable[int]) -> bool | np.ndarray: + positive = [arg for arg in args if arg >= 0] + negative = [-arg - 1 for arg in args if arg < 0] + + mask: bool | np.ndarray = False + + if positive: + mask |= np.isin(self._ascending_count, positive) + + if negative: + mask |= np.isin(self._descending_count, negative) + + return mask + + def _make_mask_from_tuple(self, args: tuple) -> bool | np.ndarray: + mask: bool | np.ndarray = False + + for arg in args: + if is_integer(arg): + mask |= self._make_mask_from_int(cast(int, arg)) + elif isinstance(arg, slice): + mask |= self._make_mask_from_slice(arg) + else: + raise ValueError( + f"Invalid argument {type(arg)}. Should be int or slice." + ) + + return mask + + def _make_mask_from_slice(self, arg: slice) -> bool | np.ndarray: + start = arg.start + stop = arg.stop + step = arg.step + + if step is not None and step < 0: + raise ValueError(f"Invalid step {step}. Must be non-negative") + + mask: bool | np.ndarray = True + + if step is None: + step = 1 + + if start is None: + if step > 1: + mask &= self._ascending_count % step == 0 + + elif start >= 0: + mask &= self._ascending_count >= start + + if step > 1: + mask &= (self._ascending_count - start) % step == 0 + + else: + mask &= self._descending_count < -start + + offset_array = self._descending_count + start + 1 + limit_array = ( + self._ascending_count + self._descending_count + (start + 1) + ) < 0 + offset_array = np.where(limit_array, self._ascending_count, offset_array) + + mask &= offset_array % step == 0 + + if stop is not None: + if stop >= 0: + mask &= self._ascending_count < stop + else: + mask &= self._descending_count >= -stop + + return mask + + @cache_readonly + def _ascending_count(self) -> np.ndarray: + if TYPE_CHECKING: + groupby_self = cast(groupby.GroupBy, self) + else: + groupby_self = self + + return groupby_self._cumcount_array() + + @cache_readonly + def _descending_count(self) -> np.ndarray: + if TYPE_CHECKING: + groupby_self = cast(groupby.GroupBy, self) + else: + groupby_self = self + + return groupby_self._cumcount_array(ascending=False) + + +@doc(GroupByIndexingMixin._positional_selector) +class GroupByPositionalSelector: + def __init__(self, groupby_object: groupby.GroupBy) -> None: + self.groupby_object = groupby_object + + def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series: + """ + Select by positional index per group. + + Implements GroupBy._positional_selector + + Parameters + ---------- + arg : PositionalIndexer | tuple + Allowed values are: + - int + - int valued iterable such as list or range + - slice with step either None or positive + - tuple of integers and slices + + Returns + ------- + Series + The filtered subset of the original groupby Series. + DataFrame + The filtered subset of the original groupby DataFrame. + + See Also + -------- + DataFrame.iloc : Integer-location based indexing for selection by position. + GroupBy.head : Return first n rows of each group. + GroupBy.tail : Return last n rows of each group. + GroupBy._positional_selector : Return positional selection for each group. + GroupBy.nth : Take the nth row from each group if n is an int, or a + subset of rows, if n is a list of ints. + """ + mask = self.groupby_object._make_mask_from_positional_indexer(arg) + return self.groupby_object._mask_selected_obj(mask) + + +class GroupByNthSelector: + """ + Dynamically substituted for GroupBy.nth to enable both call and index + """ + + def __init__(self, groupby_object: groupby.GroupBy) -> None: + self.groupby_object = groupby_object + + def __call__( + self, + n: PositionalIndexer | tuple, + dropna: Literal["any", "all", None] = None, + ) -> DataFrame | Series: + return self.groupby_object._nth(n, dropna) + + def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series: + return self.groupby_object._nth(n) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/numba_.py b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/numba_.py new file mode 100644 index 0000000000000000000000000000000000000000..3b7a58e87603e578216c4c80e8c88e06828d5dfa --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/numba_.py @@ -0,0 +1,181 @@ +"""Common utilities for Numba operations with groupby ops""" +from __future__ import annotations + +import functools +import inspect +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.util.numba_ import ( + NumbaUtilError, + jit_user_function, +) + +if TYPE_CHECKING: + from pandas._typing import Scalar + + +def validate_udf(func: Callable) -> None: + """ + Validate user defined function for ops when using Numba with groupby ops. + + The first signature arguments should include: + + def f(values, index, ...): + ... + + Parameters + ---------- + func : function, default False + user defined function + + Returns + ------- + None + + Raises + ------ + NumbaUtilError + """ + if not callable(func): + raise NotImplementedError( + "Numba engine can only be used with a single function." + ) + udf_signature = list(inspect.signature(func).parameters.keys()) + expected_args = ["values", "index"] + min_number_args = len(expected_args) + if ( + len(udf_signature) < min_number_args + or udf_signature[:min_number_args] != expected_args + ): + raise NumbaUtilError( + f"The first {min_number_args} arguments to {func.__name__} must be " + f"{expected_args}" + ) + + +@functools.cache +def generate_numba_agg_func( + func: Callable[..., Scalar], + nopython: bool, + nogil: bool, + parallel: bool, +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: + """ + Generate a numba jitted agg function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby agg function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the groupby evaluation loop. + + Parameters + ---------- + func : function + function to be applied to each group and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_agg( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_columns: int, + *args: Any, + ) -> np.ndarray: + assert len(begin) == len(end) + num_groups = len(begin) + + result = np.empty((num_groups, num_columns)) + for i in numba.prange(num_groups): + group_index = index[begin[i] : end[i]] + for j in numba.prange(num_columns): + group = values[begin[i] : end[i], j] + result[i, j] = numba_func(group, group_index, *args) + return result + + return group_agg + + +@functools.cache +def generate_numba_transform_func( + func: Callable[..., np.ndarray], + nopython: bool, + nogil: bool, + parallel: bool, +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: + """ + Generate a numba jitted transform function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby transform function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the groupby evaluation loop. + + Parameters + ---------- + func : function + function to be applied to each window and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_transform( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_columns: int, + *args: Any, + ) -> np.ndarray: + assert len(begin) == len(end) + num_groups = len(begin) + + result = np.empty((len(values), num_columns)) + for i in numba.prange(num_groups): + group_index = index[begin[i] : end[i]] + for j in numba.prange(num_columns): + group = values[begin[i] : end[i], j] + result[begin[i] : end[i], j] = numba_func(group, group_index, *args) + return result + + return group_transform diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/groupby/ops.py b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..e2ddf9aa5c0c1752e1de9b81e2e72873db050e85 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/groupby/ops.py @@ -0,0 +1,1208 @@ +""" +Provide classes to perform the groupby aggregate operations. + +These are not exposed to the user and provide implementations of the grouping +operations, primarily in cython. These classes (BaseGrouper and BinGrouper) +are contained *in* the SeriesGroupBy and DataFrameGroupBy objects. +""" +from __future__ import annotations + +import collections +import functools +from typing import ( + TYPE_CHECKING, + Callable, + Generic, + final, +) + +import numpy as np + +from pandas._libs import ( + NaT, + lib, +) +import pandas._libs.groupby as libgroupby +from pandas._typing import ( + ArrayLike, + AxisInt, + NDFrameT, + Shape, + npt, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.cast import ( + maybe_cast_pointwise_result, + maybe_downcast_to_dtype, +) +from pandas.core.dtypes.common import ( + ensure_float64, + ensure_int64, + ensure_platform_int, + ensure_uint64, + is_1d_only_ea_dtype, +) +from pandas.core.dtypes.missing import ( + isna, + maybe_fill, +) + +from pandas.core.frame import DataFrame +from pandas.core.groupby import grouper +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, + MultiIndex, + ensure_index, +) +from pandas.core.series import Series +from pandas.core.sorting import ( + compress_group_index, + decons_obs_group_ids, + get_flattened_list, + get_group_index, + get_group_index_sorter, + get_indexer_dict, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + Sequence, + ) + + from pandas.core.generic import NDFrame + + +def check_result_array(obj, dtype) -> None: + # Our operation is supposed to be an aggregation/reduction. If + # it returns an ndarray, this likely means an invalid operation has + # been passed. See test_apply_without_aggregation, test_agg_must_agg + if isinstance(obj, np.ndarray): + if dtype != object: + # If it is object dtype, the function can be a reduction/aggregation + # and still return an ndarray e.g. test_agg_over_numpy_arrays + raise ValueError("Must produce aggregated value") + + +def extract_result(res): + """ + Extract the result object, it might be a 0-dim ndarray + or a len-1 0-dim, or a scalar + """ + if hasattr(res, "_values"): + # Preserve EA + res = res._values + if res.ndim == 1 and len(res) == 1: + # see test_agg_lambda_with_timezone, test_resampler_grouper.py::test_apply + res = res[0] + return res + + +class WrappedCythonOp: + """ + Dispatch logic for functions defined in _libs.groupby + + Parameters + ---------- + kind: str + Whether the operation is an aggregate or transform. + how: str + Operation name, e.g. "mean". + has_dropped_na: bool + True precisely when dropna=True and the grouper contains a null value. + """ + + # Functions for which we do _not_ attempt to cast the cython result + # back to the original dtype. + cast_blocklist = frozenset( + ["any", "all", "rank", "count", "size", "idxmin", "idxmax"] + ) + + def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None: + self.kind = kind + self.how = how + self.has_dropped_na = has_dropped_na + + _CYTHON_FUNCTIONS: dict[str, dict] = { + "aggregate": { + "any": functools.partial(libgroupby.group_any_all, val_test="any"), + "all": functools.partial(libgroupby.group_any_all, val_test="all"), + "sum": "group_sum", + "prod": "group_prod", + "idxmin": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmin"), + "idxmax": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmax"), + "min": "group_min", + "max": "group_max", + "mean": "group_mean", + "median": "group_median_float64", + "var": "group_var", + "std": functools.partial(libgroupby.group_var, name="std"), + "sem": functools.partial(libgroupby.group_var, name="sem"), + "skew": "group_skew", + "first": "group_nth", + "last": "group_last", + "ohlc": "group_ohlc", + }, + "transform": { + "cumprod": "group_cumprod", + "cumsum": "group_cumsum", + "cummin": "group_cummin", + "cummax": "group_cummax", + "rank": "group_rank", + }, + } + + _cython_arity = {"ohlc": 4} # OHLC + + @classmethod + def get_kind_from_how(cls, how: str) -> str: + if how in cls._CYTHON_FUNCTIONS["aggregate"]: + return "aggregate" + return "transform" + + # Note: we make this a classmethod and pass kind+how so that caching + # works at the class level and not the instance level + @classmethod + @functools.cache + def _get_cython_function( + cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool + ): + dtype_str = dtype.name + ftype = cls._CYTHON_FUNCTIONS[kind][how] + + # see if there is a fused-type version of function + # only valid for numeric + if callable(ftype): + f = ftype + else: + f = getattr(libgroupby, ftype) + if is_numeric: + return f + elif dtype == np.dtype(object): + if how in ["median", "cumprod"]: + # no fused types -> no __signatures__ + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{dtype_str}]" + ) + elif how in ["std", "sem", "idxmin", "idxmax"]: + # We have a partial object that does not have __signatures__ + return f + elif how == "skew": + # _get_cython_vals will convert to float64 + pass + elif "object" not in f.__signatures__: + # raise NotImplementedError here rather than TypeError later + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{dtype_str}]" + ) + return f + else: + raise NotImplementedError( + "This should not be reached. Please report a bug at " + "github.com/pandas-dev/pandas/", + dtype, + ) + + def _get_cython_vals(self, values: np.ndarray) -> np.ndarray: + """ + Cast numeric dtypes to float64 for functions that only support that. + + Parameters + ---------- + values : np.ndarray + + Returns + ------- + values : np.ndarray + """ + how = self.how + + if how in ["median", "std", "sem", "skew"]: + # median only has a float64 implementation + # We should only get here with is_numeric, as non-numeric cases + # should raise in _get_cython_function + values = ensure_float64(values) + + elif values.dtype.kind in "iu": + if how in ["var", "mean"] or ( + self.kind == "transform" and self.has_dropped_na + ): + # has_dropped_na check need for test_null_group_str_transformer + # result may still include NaN, so we have to cast + values = ensure_float64(values) + + elif how in ["sum", "ohlc", "prod", "cumsum", "cumprod"]: + # Avoid overflow during group op + if values.dtype.kind == "i": + values = ensure_int64(values) + else: + values = ensure_uint64(values) + + return values + + def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape: + how = self.how + kind = self.kind + + arity = self._cython_arity.get(how, 1) + + out_shape: Shape + if how == "ohlc": + out_shape = (ngroups, arity) + elif arity > 1: + raise NotImplementedError( + "arity of more than 1 is not supported for the 'how' argument" + ) + elif kind == "transform": + out_shape = values.shape + else: + out_shape = (ngroups,) + values.shape[1:] + return out_shape + + def _get_out_dtype(self, dtype: np.dtype) -> np.dtype: + how = self.how + + if how == "rank": + out_dtype = "float64" + elif how in ["idxmin", "idxmax"]: + # The Cython implementation only produces the row number; we'll take + # from the index using this in post processing + out_dtype = "intp" + else: + if dtype.kind in "iufcb": + out_dtype = f"{dtype.kind}{dtype.itemsize}" + else: + out_dtype = "object" + return np.dtype(out_dtype) + + def _get_result_dtype(self, dtype: np.dtype) -> np.dtype: + """ + Get the desired dtype of a result based on the + input dtype and how it was computed. + + Parameters + ---------- + dtype : np.dtype + + Returns + ------- + np.dtype + The desired dtype of the result. + """ + how = self.how + + if how in ["sum", "cumsum", "sum", "prod", "cumprod"]: + if dtype == np.dtype(bool): + return np.dtype(np.int64) + elif how in ["mean", "median", "var", "std", "sem"]: + if dtype.kind in "fc": + return dtype + elif dtype.kind in "iub": + return np.dtype(np.float64) + return dtype + + @final + def _cython_op_ndim_compat( + self, + values: np.ndarray, + *, + min_count: int, + ngroups: int, + comp_ids: np.ndarray, + mask: npt.NDArray[np.bool_] | None = None, + result_mask: npt.NDArray[np.bool_] | None = None, + **kwargs, + ) -> np.ndarray: + if values.ndim == 1: + # expand to 2d, dispatch, then squeeze if appropriate + values2d = values[None, :] + if mask is not None: + mask = mask[None, :] + if result_mask is not None: + result_mask = result_mask[None, :] + res = self._call_cython_op( + values2d, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + if res.shape[0] == 1: + return res[0] + + # otherwise we have OHLC + return res.T + + return self._call_cython_op( + values, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + + @final + def _call_cython_op( + self, + values: np.ndarray, # np.ndarray[ndim=2] + *, + min_count: int, + ngroups: int, + comp_ids: np.ndarray, + mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None, + **kwargs, + ) -> np.ndarray: # np.ndarray[ndim=2] + orig_values = values + + dtype = values.dtype + is_numeric = dtype.kind in "iufcb" + + is_datetimelike = dtype.kind in "mM" + + if is_datetimelike: + values = values.view("int64") + is_numeric = True + elif dtype.kind == "b": + values = values.view("uint8") + if values.dtype == "float16": + values = values.astype(np.float32) + + if self.how in ["any", "all"]: + if mask is None: + mask = isna(values) + if dtype == object: + if kwargs["skipna"]: + # GH#37501: don't raise on pd.NA when skipna=True + if mask.any(): + # mask on original values computed separately + values = values.copy() + values[mask] = True + values = values.astype(bool, copy=False).view(np.int8) + is_numeric = True + + values = values.T + if mask is not None: + mask = mask.T + if result_mask is not None: + result_mask = result_mask.T + + out_shape = self._get_output_shape(ngroups, values) + func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric) + values = self._get_cython_vals(values) + out_dtype = self._get_out_dtype(values.dtype) + + result = maybe_fill(np.empty(out_shape, dtype=out_dtype)) + if self.kind == "aggregate": + counts = np.zeros(ngroups, dtype=np.int64) + if self.how in [ + "idxmin", + "idxmax", + "min", + "max", + "mean", + "last", + "first", + "sum", + ]: + func( + out=result, + counts=counts, + values=values, + labels=comp_ids, + min_count=min_count, + mask=mask, + result_mask=result_mask, + is_datetimelike=is_datetimelike, + **kwargs, + ) + elif self.how in ["sem", "std", "var", "ohlc", "prod", "median"]: + if self.how in ["std", "sem"]: + kwargs["is_datetimelike"] = is_datetimelike + func( + result, + counts, + values, + comp_ids, + min_count=min_count, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + elif self.how in ["any", "all"]: + func( + out=result, + values=values, + labels=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + result = result.astype(bool, copy=False) + elif self.how in ["skew"]: + func( + out=result, + counts=counts, + values=values, + labels=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + if dtype == object: + result = result.astype(object) + + else: + raise NotImplementedError(f"{self.how} is not implemented") + else: + # TODO: min_count + if self.how != "rank": + # TODO: should rank take result_mask? + kwargs["result_mask"] = result_mask + func( + out=result, + values=values, + labels=comp_ids, + ngroups=ngroups, + is_datetimelike=is_datetimelike, + mask=mask, + **kwargs, + ) + + if self.kind == "aggregate" and self.how not in ["idxmin", "idxmax"]: + # i.e. counts is defined. Locations where count None: + if values.ndim > 2: + raise NotImplementedError("number of dimensions is currently limited to 2") + if values.ndim == 2: + assert axis == 1, axis + elif not is_1d_only_ea_dtype(values.dtype): + # Note: it is *not* the case that axis is always 0 for 1-dim values, + # as we can have 1D ExtensionArrays that we need to treat as 2D + assert axis == 0 + + @final + def cython_operation( + self, + *, + values: ArrayLike, + axis: AxisInt, + min_count: int = -1, + comp_ids: np.ndarray, + ngroups: int, + **kwargs, + ) -> ArrayLike: + """ + Call our cython function, with appropriate pre- and post- processing. + """ + self._validate_axis(axis, values) + + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + return values._groupby_op( + how=self.how, + has_dropped_na=self.has_dropped_na, + min_count=min_count, + ngroups=ngroups, + ids=comp_ids, + **kwargs, + ) + + return self._cython_op_ndim_compat( + values, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + mask=None, + **kwargs, + ) + + +class BaseGrouper: + """ + This is an internal Grouper class, which actually holds + the generated groups + + Parameters + ---------- + axis : Index + groupings : Sequence[Grouping] + all the grouping instances to handle in this grouper + for example for grouper list to groupby, need to pass the list + sort : bool, default True + whether this grouper will give sorted result or not + + """ + + axis: Index + + def __init__( + self, + axis: Index, + groupings: Sequence[grouper.Grouping], + sort: bool = True, + dropna: bool = True, + ) -> None: + assert isinstance(axis, Index), axis + + self.axis = axis + self._groupings: list[grouper.Grouping] = list(groupings) + self._sort = sort + self.dropna = dropna + + @property + def groupings(self) -> list[grouper.Grouping]: + return self._groupings + + @property + def shape(self) -> Shape: + return tuple(ping.ngroups for ping in self.groupings) + + def __iter__(self) -> Iterator[Hashable]: + return iter(self.indices) + + @property + def nkeys(self) -> int: + return len(self.groupings) + + def get_iterator( + self, data: NDFrameT, axis: AxisInt = 0 + ) -> Iterator[tuple[Hashable, NDFrameT]]: + """ + Groupby iterator + + Returns + ------- + Generator yielding sequence of (name, subsetted object) + for each group + """ + splitter = self._get_splitter(data, axis=axis) + keys = self.group_keys_seq + yield from zip(keys, splitter) + + @final + def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter: + """ + Returns + ------- + Generator yielding subsetted objects + """ + ids, _, ngroups = self.group_info + return _get_splitter( + data, + ids, + ngroups, + sorted_ids=self._sorted_ids, + sort_idx=self._sort_idx, + axis=axis, + ) + + @final + @cache_readonly + def group_keys_seq(self): + if len(self.groupings) == 1: + return self.levels[0] + else: + ids, _, ngroups = self.group_info + + # provide "flattened" iterator for multi-group setting + return get_flattened_list(ids, ngroups, self.levels, self.codes) + + @cache_readonly + def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: + """dict {group name -> group indices}""" + if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex): + # This shows unused categories in indices GH#38642 + return self.groupings[0].indices + codes_list = [ping.codes for ping in self.groupings] + keys = [ping._group_index for ping in self.groupings] + return get_indexer_dict(codes_list, keys) + + @final + def result_ilocs(self) -> npt.NDArray[np.intp]: + """ + Get the original integer locations of result_index in the input. + """ + # Original indices are where group_index would go via sorting. + # But when dropna is true, we need to remove null values while accounting for + # any gaps that then occur because of them. + group_index = get_group_index( + self.codes, self.shape, sort=self._sort, xnull=True + ) + group_index, _ = compress_group_index(group_index, sort=self._sort) + + if self.has_dropped_na: + mask = np.where(group_index >= 0) + # Count how many gaps are caused by previous null values for each position + null_gaps = np.cumsum(group_index == -1)[mask] + group_index = group_index[mask] + + result = get_group_index_sorter(group_index, self.ngroups) + + if self.has_dropped_na: + # Shift by the number of prior null gaps + result += np.take(null_gaps, result) + + return result + + @final + @property + def codes(self) -> list[npt.NDArray[np.signedinteger]]: + return [ping.codes for ping in self.groupings] + + @property + def levels(self) -> list[Index]: + return [ping._group_index for ping in self.groupings] + + @property + def names(self) -> list[Hashable]: + return [ping.name for ping in self.groupings] + + @final + def size(self) -> Series: + """ + Compute group sizes. + """ + ids, _, ngroups = self.group_info + out: np.ndarray | list + if ngroups: + out = np.bincount(ids[ids != -1], minlength=ngroups) + else: + out = [] + return Series(out, index=self.result_index, dtype="int64", copy=False) + + @cache_readonly + def groups(self) -> dict[Hashable, np.ndarray]: + """dict {group name -> group labels}""" + if len(self.groupings) == 1: + return self.groupings[0].groups + else: + to_groupby = [] + for ping in self.groupings: + gv = ping.grouping_vector + if not isinstance(gv, BaseGrouper): + to_groupby.append(gv) + else: + to_groupby.append(gv.groupings[0].grouping_vector) + index = MultiIndex.from_arrays(to_groupby) + return self.axis.groupby(index) + + @final + @cache_readonly + def is_monotonic(self) -> bool: + # return if my group orderings are monotonic + return Index(self.group_info[0]).is_monotonic_increasing + + @final + @cache_readonly + def has_dropped_na(self) -> bool: + """ + Whether grouper has null value(s) that are dropped. + """ + return bool((self.group_info[0] < 0).any()) + + @cache_readonly + def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: + comp_ids, obs_group_ids = self._get_compressed_codes() + + ngroups = len(obs_group_ids) + comp_ids = ensure_platform_int(comp_ids) + + return comp_ids, obs_group_ids, ngroups + + @cache_readonly + def codes_info(self) -> npt.NDArray[np.intp]: + # return the codes of items in original grouped axis + ids, _, _ = self.group_info + return ids + + @final + def _get_compressed_codes( + self, + ) -> tuple[npt.NDArray[np.signedinteger], npt.NDArray[np.intp]]: + # The first returned ndarray may have any signed integer dtype + if len(self.groupings) > 1: + group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True) + return compress_group_index(group_index, sort=self._sort) + # FIXME: compress_group_index's second return value is int64, not intp + + ping = self.groupings[0] + return ping.codes, np.arange(len(ping._group_index), dtype=np.intp) + + @final + @cache_readonly + def ngroups(self) -> int: + return len(self.result_index) + + @property + def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]: + codes = self.codes + ids, obs_ids, _ = self.group_info + return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True) + + @cache_readonly + def result_index(self) -> Index: + if len(self.groupings) == 1: + return self.groupings[0]._result_index.rename(self.names[0]) + + codes = self.reconstructed_codes + levels = [ping._result_index for ping in self.groupings] + return MultiIndex( + levels=levels, codes=codes, verify_integrity=False, names=self.names + ) + + @final + def get_group_levels(self) -> list[ArrayLike]: + # Note: only called from _insert_inaxis_grouper, which + # is only called for BaseGrouper, never for BinGrouper + if len(self.groupings) == 1: + return [self.groupings[0]._group_arraylike] + + name_list = [] + for ping, codes in zip(self.groupings, self.reconstructed_codes): + codes = ensure_platform_int(codes) + levels = ping._group_arraylike.take(codes) + + name_list.append(levels) + + return name_list + + # ------------------------------------------------------------ + # Aggregation functions + + @final + def _cython_operation( + self, + kind: str, + values, + how: str, + axis: AxisInt, + min_count: int = -1, + **kwargs, + ) -> ArrayLike: + """ + Returns the values of a cython operation. + """ + assert kind in ["transform", "aggregate"] + + cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na) + + ids, _, _ = self.group_info + ngroups = self.ngroups + return cy_op.cython_operation( + values=values, + axis=axis, + min_count=min_count, + comp_ids=ids, + ngroups=ngroups, + **kwargs, + ) + + @final + def agg_series( + self, obj: Series, func: Callable, preserve_dtype: bool = False + ) -> ArrayLike: + """ + Parameters + ---------- + obj : Series + func : function taking a Series and returning a scalar-like + preserve_dtype : bool + Whether the aggregation is known to be dtype-preserving. + + Returns + ------- + np.ndarray or ExtensionArray + """ + + if not isinstance(obj._values, np.ndarray): + # we can preserve a little bit more aggressively with EA dtype + # because maybe_cast_pointwise_result will do a try/except + # with _from_sequence. NB we are assuming here that _from_sequence + # is sufficiently strict that it casts appropriately. + preserve_dtype = True + + result = self._aggregate_series_pure_python(obj, func) + + npvalues = lib.maybe_convert_objects(result, try_float=False) + if preserve_dtype: + out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True) + else: + out = npvalues + return out + + @final + def _aggregate_series_pure_python( + self, obj: Series, func: Callable + ) -> npt.NDArray[np.object_]: + _, _, ngroups = self.group_info + + result = np.empty(ngroups, dtype="O") + initialized = False + + splitter = self._get_splitter(obj, axis=0) + + for i, group in enumerate(splitter): + res = func(group) + res = extract_result(res) + + if not initialized: + # We only do this validation on the first iteration + check_result_array(res, group.dtype) + initialized = True + + result[i] = res + + return result + + @final + def apply_groupwise( + self, f: Callable, data: DataFrame | Series, axis: AxisInt = 0 + ) -> tuple[list, bool]: + mutated = False + splitter = self._get_splitter(data, axis=axis) + group_keys = self.group_keys_seq + result_values = [] + + # This calls DataSplitter.__iter__ + zipped = zip(group_keys, splitter) + + for key, group in zipped: + # Pinning name is needed for + # test_group_apply_once_per_group, + # test_inconsistent_return_type, test_set_group_name, + # test_group_name_available_in_inference_pass, + # test_groupby_multi_timezone + object.__setattr__(group, "name", key) + + # group might be modified + group_axes = group.axes + res = f(group) + if not mutated and not _is_indexed_like(res, group_axes, axis): + mutated = True + result_values.append(res) + # getattr pattern for __name__ is needed for functools.partial objects + if len(group_keys) == 0 and getattr(f, "__name__", None) in [ + "skew", + "sum", + "prod", + ]: + # If group_keys is empty, then no function calls have been made, + # so we will not have raised even if this is an invalid dtype. + # So do one dummy call here to raise appropriate TypeError. + f(data.iloc[:0]) + + return result_values, mutated + + # ------------------------------------------------------------ + # Methods for sorting subsets of our GroupBy's object + + @final + @cache_readonly + def _sort_idx(self) -> npt.NDArray[np.intp]: + # Counting sort indexer + ids, _, ngroups = self.group_info + return get_group_index_sorter(ids, ngroups) + + @final + @cache_readonly + def _sorted_ids(self) -> npt.NDArray[np.intp]: + ids, _, _ = self.group_info + return ids.take(self._sort_idx) + + +class BinGrouper(BaseGrouper): + """ + This is an internal Grouper class + + Parameters + ---------- + bins : the split index of binlabels to group the item of axis + binlabels : the label list + indexer : np.ndarray[np.intp], optional + the indexer created by Grouper + some groupers (TimeGrouper) will sort its axis and its + group_info is also sorted, so need the indexer to reorder + + Examples + -------- + bins: [2, 4, 6, 8, 10] + binlabels: DatetimeIndex(['2005-01-01', '2005-01-03', + '2005-01-05', '2005-01-07', '2005-01-09'], + dtype='datetime64[ns]', freq='2D') + + the group_info, which contains the label of each item in grouped + axis, the index of label in label list, group number, is + + (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5) + + means that, the grouped axis has 10 items, can be grouped into 5 + labels, the first and second items belong to the first label, the + third and forth items belong to the second label, and so on + + """ + + bins: npt.NDArray[np.int64] + binlabels: Index + + def __init__( + self, + bins, + binlabels, + indexer=None, + ) -> None: + self.bins = ensure_int64(bins) + self.binlabels = ensure_index(binlabels) + self.indexer = indexer + + # These lengths must match, otherwise we could call agg_series + # with empty self.bins, which would raise later. + assert len(self.binlabels) == len(self.bins) + + @cache_readonly + def groups(self): + """dict {group name -> group labels}""" + # this is mainly for compat + # GH 3881 + result = { + key: value + for key, value in zip(self.binlabels, self.bins) + if key is not NaT + } + return result + + @property + def nkeys(self) -> int: + # still matches len(self.groupings), but we can hard-code + return 1 + + @cache_readonly + def codes_info(self) -> npt.NDArray[np.intp]: + # return the codes of items in original grouped axis + ids, _, _ = self.group_info + if self.indexer is not None: + sorter = np.lexsort((ids, self.indexer)) + ids = ids[sorter] + return ids + + def get_iterator(self, data: NDFrame, axis: AxisInt = 0): + """ + Groupby iterator + + Returns + ------- + Generator yielding sequence of (name, subsetted object) + for each group + """ + if axis == 0: + slicer = lambda start, edge: data.iloc[start:edge] + else: + slicer = lambda start, edge: data.iloc[:, start:edge] + + length = len(data.axes[axis]) + + start = 0 + for edge, label in zip(self.bins, self.binlabels): + if label is not NaT: + yield label, slicer(start, edge) + start = edge + + if start < length: + yield self.binlabels[-1], slicer(start, None) + + @cache_readonly + def indices(self): + indices = collections.defaultdict(list) + + i = 0 + for label, bin in zip(self.binlabels, self.bins): + if i < bin: + if label is not NaT: + indices[label] = list(range(i, bin)) + i = bin + return indices + + @cache_readonly + def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: + ngroups = self.ngroups + obs_group_ids = np.arange(ngroups, dtype=np.intp) + rep = np.diff(np.r_[0, self.bins]) + + rep = ensure_platform_int(rep) + if ngroups == len(self.bins): + comp_ids = np.repeat(np.arange(ngroups), rep) + else: + comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep) + + return ( + ensure_platform_int(comp_ids), + obs_group_ids, + ngroups, + ) + + @cache_readonly + def reconstructed_codes(self) -> list[np.ndarray]: + # get unique result indices, and prepend 0 as groupby starts from the first + return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]] + + @cache_readonly + def result_index(self) -> Index: + if len(self.binlabels) != 0 and isna(self.binlabels[0]): + return self.binlabels[1:] + + return self.binlabels + + @property + def levels(self) -> list[Index]: + return [self.binlabels] + + @property + def names(self) -> list[Hashable]: + return [self.binlabels.name] + + @property + def groupings(self) -> list[grouper.Grouping]: + lev = self.binlabels + codes = self.group_info[0] + labels = lev.take(codes) + ping = grouper.Grouping( + labels, labels, in_axis=False, level=None, uniques=lev._values + ) + return [ping] + + +def _is_indexed_like(obj, axes, axis: AxisInt) -> bool: + if isinstance(obj, Series): + if len(axes) > 1: + return False + return obj.axes[axis].equals(axes[axis]) + elif isinstance(obj, DataFrame): + return obj.axes[axis].equals(axes[axis]) + + return False + + +# ---------------------------------------------------------------------- +# Splitting / application + + +class DataSplitter(Generic[NDFrameT]): + def __init__( + self, + data: NDFrameT, + labels: npt.NDArray[np.intp], + ngroups: int, + *, + sort_idx: npt.NDArray[np.intp], + sorted_ids: npt.NDArray[np.intp], + axis: AxisInt = 0, + ) -> None: + self.data = data + self.labels = ensure_platform_int(labels) # _should_ already be np.intp + self.ngroups = ngroups + + self._slabels = sorted_ids + self._sort_idx = sort_idx + + self.axis = axis + assert isinstance(axis, int), axis + + def __iter__(self) -> Iterator: + sdata = self._sorted_data + + if self.ngroups == 0: + # we are inside a generator, rather than raise StopIteration + # we merely return signal the end + return + + starts, ends = lib.generate_slices(self._slabels, self.ngroups) + + for start, end in zip(starts, ends): + yield self._chop(sdata, slice(start, end)) + + @cache_readonly + def _sorted_data(self) -> NDFrameT: + return self.data.take(self._sort_idx, axis=self.axis) + + def _chop(self, sdata, slice_obj: slice) -> NDFrame: + raise AbstractMethodError(self) + + +class SeriesSplitter(DataSplitter): + def _chop(self, sdata: Series, slice_obj: slice) -> Series: + # fastpath equivalent to `sdata.iloc[slice_obj]` + mgr = sdata._mgr.get_slice(slice_obj) + ser = sdata._constructor_from_mgr(mgr, axes=mgr.axes) + ser._name = sdata.name + return ser.__finalize__(sdata, method="groupby") + + +class FrameSplitter(DataSplitter): + def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame: + # Fastpath equivalent to: + # if self.axis == 0: + # return sdata.iloc[slice_obj] + # else: + # return sdata.iloc[:, slice_obj] + mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis) + df = sdata._constructor_from_mgr(mgr, axes=mgr.axes) + return df.__finalize__(sdata, method="groupby") + + +def _get_splitter( + data: NDFrame, + labels: npt.NDArray[np.intp], + ngroups: int, + *, + sort_idx: npt.NDArray[np.intp], + sorted_ids: npt.NDArray[np.intp], + axis: AxisInt = 0, +) -> DataSplitter: + if isinstance(data, Series): + klass: type[DataSplitter] = SeriesSplitter + else: + # i.e. DataFrame + klass = FrameSplitter + + return klass( + data, labels, ngroups, sort_idx=sort_idx, sorted_ids=sorted_ids, axis=axis + ) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__init__.py b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ba8a4f1d0ee7adb668c6b0ac49b2360d3c0dc356 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__init__.py @@ -0,0 +1,31 @@ +from pandas.core.indexers.utils import ( + check_array_indexer, + check_key_length, + check_setitem_lengths, + disallow_ndim_indexing, + is_empty_indexer, + is_list_like_indexer, + is_scalar_indexer, + is_valid_positional_slice, + length_of_indexer, + maybe_convert_indices, + unpack_1tuple, + unpack_tuple_and_ellipses, + validate_indices, +) + +__all__ = [ + "is_valid_positional_slice", + "is_list_like_indexer", + "is_scalar_indexer", + "is_empty_indexer", + "check_setitem_lengths", + "validate_indices", + "maybe_convert_indices", + "length_of_indexer", + "disallow_ndim_indexing", + "unpack_1tuple", + "check_key_length", + "check_array_indexer", + "unpack_tuple_and_ellipses", +] diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d4667b1e82d831abed03ac26585bac528625b14 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f211a986754882eca679d8fba23c0117ee3a8c49 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0967f9c7d5b8cb175ee6e15438ab3860764a557 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/indexers/objects.py b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/objects.py new file mode 100644 index 0000000000000000000000000000000000000000..f2db4886a559017422ed41bb8bd2246d6a3f0fb0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/objects.py @@ -0,0 +1,453 @@ +"""Indexer objects for computing start/end window bounds for rolling operations""" +from __future__ import annotations + +from datetime import timedelta + +import numpy as np + +from pandas._libs.tslibs import BaseOffset +from pandas._libs.window.indexers import calculate_variable_window_bounds +from pandas.util._decorators import Appender + +from pandas.core.dtypes.common import ensure_platform_int + +from pandas.core.indexes.datetimes import DatetimeIndex + +from pandas.tseries.offsets import Nano + +get_window_bounds_doc = """ +Computes the bounds of a window. + +Parameters +---------- +num_values : int, default 0 + number of values that will be aggregated over +window_size : int, default 0 + the number of rows in a window +min_periods : int, default None + min_periods passed from the top level rolling API +center : bool, default None + center passed from the top level rolling API +closed : str, default None + closed passed from the top level rolling API +step : int, default None + step passed from the top level rolling API + .. versionadded:: 1.5 +win_type : str, default None + win_type passed from the top level rolling API + +Returns +------- +A tuple of ndarray[int64]s, indicating the boundaries of each +window +""" + + +class BaseIndexer: + """ + Base class for window bounds calculations. + + Examples + -------- + >>> from pandas.api.indexers import BaseIndexer + >>> class CustomIndexer(BaseIndexer): + ... def get_window_bounds(self, num_values, min_periods, center, closed, step): + ... start = np.empty(num_values, dtype=np.int64) + ... end = np.empty(num_values, dtype=np.int64) + ... for i in range(num_values): + ... start[i] = i + ... end[i] = i + self.window_size + ... return start, end + >>> df = pd.DataFrame({"values": range(5)}) + >>> indexer = CustomIndexer(window_size=2) + >>> df.rolling(indexer).sum() + values + 0 1.0 + 1 3.0 + 2 5.0 + 3 7.0 + 4 4.0 + """ + + def __init__( + self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs + ) -> None: + self.index_array = index_array + self.window_size = window_size + # Set user defined kwargs as attributes that can be used in get_window_bounds + for key, value in kwargs.items(): + setattr(self, key, value) + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + raise NotImplementedError + + +class FixedWindowIndexer(BaseIndexer): + """Creates window boundaries that are of fixed length.""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + if center or self.window_size == 0: + offset = (self.window_size - 1) // 2 + else: + offset = 0 + + end = np.arange(1 + offset, num_values + 1 + offset, step, dtype="int64") + start = end - self.window_size + if closed in ["left", "both"]: + start -= 1 + if closed in ["left", "neither"]: + end -= 1 + + end = np.clip(end, 0, num_values) + start = np.clip(start, 0, num_values) + + return start, end + + +class VariableWindowIndexer(BaseIndexer): + """Creates window boundaries that are of variable length, namely for time series.""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + # error: Argument 4 to "calculate_variable_window_bounds" has incompatible + # type "Optional[bool]"; expected "bool" + # error: Argument 6 to "calculate_variable_window_bounds" has incompatible + # type "Optional[ndarray]"; expected "ndarray" + return calculate_variable_window_bounds( + num_values, + self.window_size, + min_periods, + center, # type: ignore[arg-type] + closed, + self.index_array, # type: ignore[arg-type] + ) + + +class VariableOffsetWindowIndexer(BaseIndexer): + """ + Calculate window boundaries based on a non-fixed offset such as a BusinessDay. + + Examples + -------- + >>> from pandas.api.indexers import VariableOffsetWindowIndexer + >>> df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10)) + >>> offset = pd.offsets.BDay(1) + >>> indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset) + >>> df + 0 + 2020-01-01 0 + 2020-01-02 1 + 2020-01-03 2 + 2020-01-04 3 + 2020-01-05 4 + 2020-01-06 5 + 2020-01-07 6 + 2020-01-08 7 + 2020-01-09 8 + 2020-01-10 9 + >>> df.rolling(indexer).sum() + 0 + 2020-01-01 0.0 + 2020-01-02 1.0 + 2020-01-03 2.0 + 2020-01-04 3.0 + 2020-01-05 7.0 + 2020-01-06 12.0 + 2020-01-07 6.0 + 2020-01-08 7.0 + 2020-01-09 8.0 + 2020-01-10 9.0 + """ + + def __init__( + self, + index_array: np.ndarray | None = None, + window_size: int = 0, + index: DatetimeIndex | None = None, + offset: BaseOffset | None = None, + **kwargs, + ) -> None: + super().__init__(index_array, window_size, **kwargs) + if not isinstance(index, DatetimeIndex): + raise ValueError("index must be a DatetimeIndex.") + self.index = index + if not isinstance(offset, BaseOffset): + raise ValueError("offset must be a DateOffset-like object.") + self.offset = offset + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + if step is not None: + raise NotImplementedError("step not implemented for variable offset window") + if num_values <= 0: + return np.empty(0, dtype="int64"), np.empty(0, dtype="int64") + + # if windows is variable, default is 'right', otherwise default is 'both' + if closed is None: + closed = "right" if self.index is not None else "both" + + right_closed = closed in ["right", "both"] + left_closed = closed in ["left", "both"] + + if self.index[num_values - 1] < self.index[0]: + index_growth_sign = -1 + else: + index_growth_sign = 1 + offset_diff = index_growth_sign * self.offset + + start = np.empty(num_values, dtype="int64") + start.fill(-1) + end = np.empty(num_values, dtype="int64") + end.fill(-1) + + start[0] = 0 + + # right endpoint is closed + if right_closed: + end[0] = 1 + # right endpoint is open + else: + end[0] = 0 + + zero = timedelta(0) + # start is start of slice interval (including) + # end is end of slice interval (not including) + for i in range(1, num_values): + end_bound = self.index[i] + start_bound = end_bound - offset_diff + + # left endpoint is closed + if left_closed: + start_bound -= Nano(1) + + # advance the start bound until we are + # within the constraint + start[i] = i + for j in range(start[i - 1], i): + start_diff = (self.index[j] - start_bound) * index_growth_sign + if start_diff > zero: + start[i] = j + break + + # end bound is previous end + # or current index + end_diff = (self.index[end[i - 1]] - end_bound) * index_growth_sign + if end_diff == zero and not right_closed: + end[i] = end[i - 1] + 1 + elif end_diff <= zero: + end[i] = i + 1 + else: + end[i] = end[i - 1] + + # right endpoint is open + if not right_closed: + end[i] -= 1 + + return start, end + + +class ExpandingIndexer(BaseIndexer): + """Calculate expanding window bounds, mimicking df.expanding()""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + return ( + np.zeros(num_values, dtype=np.int64), + np.arange(1, num_values + 1, dtype=np.int64), + ) + + +class FixedForwardWindowIndexer(BaseIndexer): + """ + Creates window boundaries for fixed-length windows that include the current row. + + Examples + -------- + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2) + >>> df.rolling(window=indexer, min_periods=1).sum() + B + 0 1.0 + 1 3.0 + 2 2.0 + 3 4.0 + 4 4.0 + """ + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + if center: + raise ValueError("Forward-looking windows can't have center=True") + if closed is not None: + raise ValueError( + "Forward-looking windows don't support setting the closed argument" + ) + if step is None: + step = 1 + + start = np.arange(0, num_values, step, dtype="int64") + end = start + self.window_size + if self.window_size: + end = np.clip(end, 0, num_values) + + return start, end + + +class GroupbyIndexer(BaseIndexer): + """Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()""" + + def __init__( + self, + index_array: np.ndarray | None = None, + window_size: int | BaseIndexer = 0, + groupby_indices: dict | None = None, + window_indexer: type[BaseIndexer] = BaseIndexer, + indexer_kwargs: dict | None = None, + **kwargs, + ) -> None: + """ + Parameters + ---------- + index_array : np.ndarray or None + np.ndarray of the index of the original object that we are performing + a chained groupby operation over. This index has been pre-sorted relative to + the groups + window_size : int or BaseIndexer + window size during the windowing operation + groupby_indices : dict or None + dict of {group label: [positional index of rows belonging to the group]} + window_indexer : BaseIndexer + BaseIndexer class determining the start and end bounds of each group + indexer_kwargs : dict or None + Custom kwargs to be passed to window_indexer + **kwargs : + keyword arguments that will be available when get_window_bounds is called + """ + self.groupby_indices = groupby_indices or {} + self.window_indexer = window_indexer + self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {} + super().__init__( + index_array=index_array, + window_size=self.indexer_kwargs.pop("window_size", window_size), + **kwargs, + ) + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + # 1) For each group, get the indices that belong to the group + # 2) Use the indices to calculate the start & end bounds of the window + # 3) Append the window bounds in group order + start_arrays = [] + end_arrays = [] + window_indices_start = 0 + for key, indices in self.groupby_indices.items(): + index_array: np.ndarray | None + + if self.index_array is not None: + index_array = self.index_array.take(ensure_platform_int(indices)) + else: + index_array = self.index_array + indexer = self.window_indexer( + index_array=index_array, + window_size=self.window_size, + **self.indexer_kwargs, + ) + start, end = indexer.get_window_bounds( + len(indices), min_periods, center, closed, step + ) + start = start.astype(np.int64) + end = end.astype(np.int64) + assert len(start) == len( + end + ), "these should be equal in length from get_window_bounds" + # Cannot use groupby_indices as they might not be monotonic with the object + # we're rolling over + window_indices = np.arange( + window_indices_start, window_indices_start + len(indices) + ) + window_indices_start += len(indices) + # Extend as we'll be slicing window like [start, end) + window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype( + np.int64, copy=False + ) + start_arrays.append(window_indices.take(ensure_platform_int(start))) + end_arrays.append(window_indices.take(ensure_platform_int(end))) + if len(start_arrays) == 0: + return np.array([], dtype=np.int64), np.array([], dtype=np.int64) + start = np.concatenate(start_arrays) + end = np.concatenate(end_arrays) + return start, end + + +class ExponentialMovingWindowIndexer(BaseIndexer): + """Calculate ewm window bounds (the entire window)""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + return np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64) diff --git a/videollama2/lib/python3.10/site-packages/pandas/core/indexers/utils.py b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..55bb58f3108c3d7004058494284ea6fb4b2fca7f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/pandas/core/indexers/utils.py @@ -0,0 +1,553 @@ +""" +Low-dependency indexing utilities. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.common import ( + is_array_like, + is_bool_dtype, + is_integer, + is_integer_dtype, + is_list_like, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from pandas._typing import AnyArrayLike + + from pandas.core.frame import DataFrame + from pandas.core.indexes.base import Index + +# ----------------------------------------------------------- +# Indexer Identification + + +def is_valid_positional_slice(slc: slice) -> bool: + """ + Check if a slice object can be interpreted as a positional indexer. + + Parameters + ---------- + slc : slice + + Returns + ------- + bool + + Notes + ----- + A valid positional slice may also be interpreted as a label-based slice + depending on the index being sliced. + """ + return ( + lib.is_int_or_none(slc.start) + and lib.is_int_or_none(slc.stop) + and lib.is_int_or_none(slc.step) + ) + + +def is_list_like_indexer(key) -> bool: + """ + Check if we have a list-like indexer that is *not* a NamedTuple. + + Parameters + ---------- + key : object + + Returns + ------- + bool + """ + # allow a list_like, but exclude NamedTuples which can be indexers + return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple) + + +def is_scalar_indexer(indexer, ndim: int) -> bool: + """ + Return True if we are all scalar indexers. + + Parameters + ---------- + indexer : object + ndim : int + Number of dimensions in the object being indexed. + + Returns + ------- + bool + """ + if ndim == 1 and is_integer(indexer): + # GH37748: allow indexer to be an integer for Series + return True + if isinstance(indexer, tuple) and len(indexer) == ndim: + return all(is_integer(x) for x in indexer) + return False + + +def is_empty_indexer(indexer) -> bool: + """ + Check if we have an empty indexer. + + Parameters + ---------- + indexer : object + + Returns + ------- + bool + """ + if is_list_like(indexer) and not len(indexer): + return True + if not isinstance(indexer, tuple): + indexer = (indexer,) + return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) + + +# ----------------------------------------------------------- +# Indexer Validation + + +def check_setitem_lengths(indexer, value, values) -> bool: + """ + Validate that value and indexer are the same length. + + An special-case is allowed for when the indexer is a boolean array + and the number of true values equals the length of ``value``. In + this case, no exception is raised. + + Parameters + ---------- + indexer : sequence + Key for the setitem. + value : array-like + Value for the setitem. + values : array-like + Values being set into. + + Returns + ------- + bool + Whether this is an empty listlike setting which is a no-op. + + Raises + ------ + ValueError + When the indexer is an ndarray or list and the lengths don't match. + """ + no_op = False + + if isinstance(indexer, (np.ndarray, list)): + # We can ignore other listlikes because they are either + # a) not necessarily 1-D indexers, e.g. tuple + # b) boolean indexers e.g. BoolArray + if is_list_like(value): + if len(indexer) != len(value) and values.ndim == 1: + # boolean with truth values == len of the value is ok too + if isinstance(indexer, list): + indexer = np.array(indexer) + if not ( + isinstance(indexer, np.ndarray) + and indexer.dtype == np.bool_ + and indexer.sum() == len(value) + ): + raise ValueError( + "cannot set using a list-like indexer " + "with a different length than the value" + ) + if not len(indexer): + no_op = True + + elif isinstance(indexer, slice): + if is_list_like(value): + if len(value) != length_of_indexer(indexer, values) and values.ndim == 1: + # In case of two dimensional value is used row-wise and broadcasted + raise ValueError( + "cannot set using a slice indexer with a " + "different length than the value" + ) + if not len(value): + no_op = True + + return no_op + + +def validate_indices(indices: np.ndarray, n: int) -> None: + """ + Perform bounds-checking for an indexer. + + -1 is allowed for indicating missing values. + + Parameters + ---------- + indices : ndarray + n : int + Length of the array being indexed. + + Raises + ------ + ValueError + + Examples + -------- + >>> validate_indices(np.array([1, 2]), 3) # OK + + >>> validate_indices(np.array([1, -2]), 3) + Traceback (most recent call last): + ... + ValueError: negative dimensions are not allowed + + >>> validate_indices(np.array([1, 2, 3]), 3) + Traceback (most recent call last): + ... + IndexError: indices are out-of-bounds + + >>> validate_indices(np.array([-1, -1]), 0) # OK + + >>> validate_indices(np.array([0, 1]), 0) + Traceback (most recent call last): + ... + IndexError: indices are out-of-bounds + """ + if len(indices): + min_idx = indices.min() + if min_idx < -1: + msg = f"'indices' contains values less than allowed ({min_idx} < -1)" + raise ValueError(msg) + + max_idx = indices.max() + if max_idx >= n: + raise IndexError("indices are out-of-bounds") + + +# ----------------------------------------------------------- +# Indexer Conversion + + +def maybe_convert_indices(indices, n: int, verify: bool = True) -> np.ndarray: + """ + Attempt to convert indices into valid, positive indices. + + If we have negative indices, translate to positive here. + If we have indices that are out-of-bounds, raise an IndexError. + + Parameters + ---------- + indices : array-like + Array of indices that we are to convert. + n : int + Number of elements in the array that we are indexing. + verify : bool, default True + Check that all entries are between 0 and n - 1, inclusive. + + Returns + ------- + array-like + An array-like of positive indices that correspond to the ones + that were passed in initially to this function. + + Raises + ------ + IndexError + One of the converted indices either exceeded the number of, + elements (specified by `n`), or was still negative. + """ + if isinstance(indices, list): + indices = np.array(indices) + if len(indices) == 0: + # If `indices` is empty, np.array will return a float, + # and will cause indexing errors. + return np.empty(0, dtype=np.intp) + + mask = indices < 0 + if mask.any(): + indices = indices.copy() + indices[mask] += n + + if verify: + mask = (indices >= n) | (indices < 0) + if mask.any(): + raise IndexError("indices are out-of-bounds") + return indices + + +# ----------------------------------------------------------- +# Unsorted + + +def length_of_indexer(indexer, target=None) -> int: + """ + Return the expected length of target[indexer] + + Returns + ------- + int + """ + if target is not None and isinstance(indexer, slice): + target_len = len(target) + start = indexer.start + stop = indexer.stop + step = indexer.step + if start is None: + start = 0 + elif start < 0: + start += target_len + if stop is None or stop > target_len: + stop = target_len + elif stop < 0: + stop += target_len + if step is None: + step = 1 + elif step < 0: + start, stop = stop + 1, start + 1 + step = -step + return (stop - start + step - 1) // step + elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)): + if isinstance(indexer, list): + indexer = np.array(indexer) + + if indexer.dtype == bool: + # GH#25774 + return indexer.sum() + return len(indexer) + elif isinstance(indexer, range): + return (indexer.stop - indexer.start) // indexer.step + elif not is_list_like_indexer(indexer): + return 1 + raise AssertionError("cannot find the length of the indexer") + + +def disallow_ndim_indexing(result) -> None: + """ + Helper function to disallow multi-dimensional indexing on 1D Series/Index. + + GH#27125 indexer like idx[:, None] expands dim, but we cannot do that + and keep an index, so we used to return ndarray, which was deprecated + in GH#30588. + """ + if np.ndim(result) > 1: + raise ValueError( + "Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer " + "supported. Convert to a numpy array before indexing instead." + ) + + +def unpack_1tuple(tup): + """ + If we have a length-1 tuple/list that contains a slice, unpack to just + the slice. + + Notes + ----- + The list case is deprecated. + """ + if len(tup) == 1 and isinstance(tup[0], slice): + # if we don't have a MultiIndex, we may still be able to handle + # a 1-tuple. see test_1tuple_without_multiindex + + if isinstance(tup, list): + # GH#31299 + raise ValueError( + "Indexing with a single-item list containing a " + "slice is not allowed. Pass a tuple instead.", + ) + + return tup[0] + return tup + + +def check_key_length(columns: Index, key, value: DataFrame) -> None: + """ + Checks if a key used as indexer has the same length as the columns it is + associated with. + + Parameters + ---------- + columns : Index The columns of the DataFrame to index. + key : A list-like of keys to index with. + value : DataFrame The value to set for the keys. + + Raises + ------ + ValueError: If the length of key is not equal to the number of columns in value + or if the number of columns referenced by key is not equal to number + of columns. + """ + if columns.is_unique: + if len(value.columns) != len(key): + raise ValueError("Columns must be same length as key") + else: + # Missing keys in columns are represented as -1 + if len(columns.get_indexer_non_unique(key)[0]) != len(value.columns): + raise ValueError("Columns must be same length as key") + + +def unpack_tuple_and_ellipses(item: tuple): + """ + Possibly unpack arr[..., n] to arr[n] + """ + if len(item) > 1: + # Note: we are assuming this indexing is being done on a 1D arraylike + if item[0] is Ellipsis: + item = item[1:] + elif item[-1] is Ellipsis: + item = item[:-1] + + if len(item) > 1: + raise IndexError("too many indices for array.") + + item = item[0] + return item + + +# ----------------------------------------------------------- +# Public indexer validation + + +def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: + """ + Check if `indexer` is a valid array indexer for `array`. + + For a boolean mask, `array` and `indexer` are checked to have the same + length. The dtype is validated, and if it is an integer or boolean + ExtensionArray, it is checked if there are missing values present, and + it is converted to the appropriate numpy array. Other dtypes will raise + an error. + + Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed + through as is. + + Parameters + ---------- + array : array-like + The array that is being indexed (only used for the length). + indexer : array-like or list-like + The array-like that's used to index. List-like input that is not yet + a numpy array or an ExtensionArray is converted to one. Other input + types are passed through as is. + + Returns + ------- + numpy.ndarray + The validated indexer as a numpy array that can be used to index. + + Raises + ------ + IndexError + When the lengths don't match. + ValueError + When `indexer` cannot be converted to a numpy ndarray to index + (e.g. presence of missing values). + + See Also + -------- + api.types.is_bool_dtype : Check if `key` is of boolean dtype. + + Examples + -------- + When checking a boolean mask, a boolean ndarray is returned when the + arguments are all valid. + + >>> mask = pd.array([True, False]) + >>> arr = pd.array([1, 2]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + array([ True, False]) + + An IndexError is raised when the lengths don't match. + + >>> mask = pd.array([True, False, True]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + Traceback (most recent call last): + ... + IndexError: Boolean index has wrong length: 3 instead of 2. + + NA values in a boolean array are treated as False. + + >>> mask = pd.array([True, pd.NA]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + array([ True, False]) + + A numpy boolean mask will get passed through (if the length is correct): + + >>> mask = np.array([True, False]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + array([ True, False]) + + Similarly for integer indexers, an integer ndarray is returned when it is + a valid indexer, otherwise an error is (for integer indexers, a matching + length is not required): + + >>> indexer = pd.array([0, 2], dtype="Int64") + >>> arr = pd.array([1, 2, 3]) + >>> pd.api.indexers.check_array_indexer(arr, indexer) + array([0, 2]) + + >>> indexer = pd.array([0, pd.NA], dtype="Int64") + >>> pd.api.indexers.check_array_indexer(arr, indexer) + Traceback (most recent call last): + ... + ValueError: Cannot index with an integer indexer containing NA values + + For non-integer/boolean dtypes, an appropriate error is raised: + + >>> indexer = np.array([0., 2.], dtype="float64") + >>> pd.api.indexers.check_array_indexer(arr, indexer) + Traceback (most recent call last): + ... + IndexError: arrays used as indices must be of integer or boolean type + """ + from pandas.core.construction import array as pd_array + + # whatever is not an array-like is returned as-is (possible valid array + # indexers that are not array-like: integer, slice, Ellipsis, None) + # In this context, tuples are not considered as array-like, as they have + # a specific meaning in indexing (multi-dimensional indexing) + if is_list_like(indexer): + if isinstance(indexer, tuple): + return indexer + else: + return indexer + + # convert list-likes to array + if not is_array_like(indexer): + indexer = pd_array(indexer) + if len(indexer) == 0: + # empty list is converted to float array by pd.array + indexer = np.array([], dtype=np.intp) + + dtype = indexer.dtype + if is_bool_dtype(dtype): + if isinstance(dtype, ExtensionDtype): + indexer = indexer.to_numpy(dtype=bool, na_value=False) + else: + indexer = np.asarray(indexer, dtype=bool) + + # GH26658 + if len(indexer) != len(array): + raise IndexError( + f"Boolean index has wrong length: " + f"{len(indexer)} instead of {len(array)}" + ) + elif is_integer_dtype(dtype): + try: + indexer = np.asarray(indexer, dtype=np.intp) + except ValueError as err: + raise ValueError( + "Cannot index with an integer indexer containing NA values" + ) from err + else: + raise IndexError("arrays used as indices must be of integer or boolean type") + + return indexer diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/__pycache__/read_stencil.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/__pycache__/read_stencil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5583a225c5f306394edbf8b4c82ad238acd50935 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/__pycache__/read_stencil.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/__pycache__/sRGB_formats.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/__pycache__/sRGB_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c56f24fdffe5b203c5d5ad9c67690ec908afb74 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/__pycache__/sRGB_formats.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/blend_equation_advanced.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/blend_equation_advanced.py new file mode 100644 index 0000000000000000000000000000000000000000..82d8f3afccc8a10bcbedfe216cf477e46ae591d0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/blend_equation_advanced.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.blend_equation_advanced + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.blend_equation_advanced to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/blend_equation_advanced.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.blend_equation_advanced import * +from OpenGL.raw.GLES2.NV.blend_equation_advanced import _EXTENSION_NAME + +def glInitBlendEquationAdvancedNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/coverage_sample.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/coverage_sample.py new file mode 100644 index 0000000000000000000000000000000000000000..dd8f254fa6812a3c1e27f0526a8203067c113db5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/coverage_sample.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.coverage_sample + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.coverage_sample to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/coverage_sample.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.coverage_sample import * +from OpenGL.raw.GLES2.NV.coverage_sample import _EXTENSION_NAME + +def glInitCoverageSampleNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/draw_buffers.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/draw_buffers.py new file mode 100644 index 0000000000000000000000000000000000000000..c1b64d62993593104ac9dee0166b4fbb70836ba8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/draw_buffers.py @@ -0,0 +1,26 @@ +'''OpenGL extension NV.draw_buffers + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.draw_buffers to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/draw_buffers.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.draw_buffers import * +from OpenGL.raw.GLES2.NV.draw_buffers import _EXTENSION_NAME + +def glInitDrawBuffersNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + +# INPUT glDrawBuffersNV.bufs size not checked against n +glDrawBuffersNV=wrapper.wrapper(glDrawBuffersNV).setInputArraySize( + 'bufs', None +) +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/explicit_attrib_location.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/explicit_attrib_location.py new file mode 100644 index 0000000000000000000000000000000000000000..390ac96c7d46ed7a0c10f24f0183ac2b5745c628 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/explicit_attrib_location.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.explicit_attrib_location + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.explicit_attrib_location to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/explicit_attrib_location.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.explicit_attrib_location import * +from OpenGL.raw.GLES2.NV.explicit_attrib_location import _EXTENSION_NAME + +def glInitExplicitAttribLocationNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/framebuffer_multisample.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/framebuffer_multisample.py new file mode 100644 index 0000000000000000000000000000000000000000..e1defb3bc67a56e8676a50dc1afb6ec4dd2f539a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/framebuffer_multisample.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.framebuffer_multisample + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.framebuffer_multisample to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/framebuffer_multisample.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.framebuffer_multisample import * +from OpenGL.raw.GLES2.NV.framebuffer_multisample import _EXTENSION_NAME + +def glInitFramebufferMultisampleNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/generate_mipmap_sRGB.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/generate_mipmap_sRGB.py new file mode 100644 index 0000000000000000000000000000000000000000..2e72dc75d6bb3ec9a32420c901c35c779ae639f3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/generate_mipmap_sRGB.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.generate_mipmap_sRGB + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.generate_mipmap_sRGB to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/generate_mipmap_sRGB.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.generate_mipmap_sRGB import * +from OpenGL.raw.GLES2.NV.generate_mipmap_sRGB import _EXTENSION_NAME + +def glInitGenerateMipmapSrgbNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/instanced_arrays.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/instanced_arrays.py new file mode 100644 index 0000000000000000000000000000000000000000..7106ab789f1e94d4e50fd56078dc738ac06d23d3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/instanced_arrays.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.instanced_arrays + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.instanced_arrays to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/instanced_arrays.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.instanced_arrays import * +from OpenGL.raw.GLES2.NV.instanced_arrays import _EXTENSION_NAME + +def glInitInstancedArraysNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/read_buffer.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/read_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..0d9ca5350a118a91b7030c630eab1f81108fa878 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/read_buffer.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.read_buffer + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.read_buffer to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/read_buffer.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.read_buffer import * +from OpenGL.raw.GLES2.NV.read_buffer import _EXTENSION_NAME + +def glInitReadBufferNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/read_depth_stencil.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/read_depth_stencil.py new file mode 100644 index 0000000000000000000000000000000000000000..db42f4d19a36856b0faaef0736a0fa465b65f84d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/read_depth_stencil.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.read_depth_stencil + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.read_depth_stencil to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/read_depth_stencil.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.read_depth_stencil import * +from OpenGL.raw.GLES2.NV.read_depth_stencil import _EXTENSION_NAME + +def glInitReadDepthStencilNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/read_stencil.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/read_stencil.py new file mode 100644 index 0000000000000000000000000000000000000000..b2af467f75669b90a35b3e4a5afd5b7f86adb213 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/read_stencil.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.read_stencil + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.read_stencil to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/read_stencil.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.read_stencil import * +from OpenGL.raw.GLES2.NV.read_stencil import _EXTENSION_NAME + +def glInitReadStencilNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/sRGB_formats.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/sRGB_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..8bd5281ba99c6bfde416d2bce5d6f2d65fafa771 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/sRGB_formats.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.sRGB_formats + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.sRGB_formats to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/sRGB_formats.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.sRGB_formats import * +from OpenGL.raw.GLES2.NV.sRGB_formats import _EXTENSION_NAME + +def glInitSrgbFormatsNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/shadow_samplers_array.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/shadow_samplers_array.py new file mode 100644 index 0000000000000000000000000000000000000000..b92a073456302479ef99da3e03fa36e10fe0d33c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/shadow_samplers_array.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.shadow_samplers_array + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.shadow_samplers_array to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/shadow_samplers_array.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.shadow_samplers_array import * +from OpenGL.raw.GLES2.NV.shadow_samplers_array import _EXTENSION_NAME + +def glInitShadowSamplersArrayNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/shadow_samplers_cube.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/shadow_samplers_cube.py new file mode 100644 index 0000000000000000000000000000000000000000..51069f4e7017a01619c3a2310f16d34e77184dd7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/shadow_samplers_cube.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.shadow_samplers_cube + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.shadow_samplers_cube to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/shadow_samplers_cube.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.shadow_samplers_cube import * +from OpenGL.raw.GLES2.NV.shadow_samplers_cube import _EXTENSION_NAME + +def glInitShadowSamplersCubeNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/texture_compression_s3tc_update.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/texture_compression_s3tc_update.py new file mode 100644 index 0000000000000000000000000000000000000000..2ba6ae7fb0e37d2f66fec43c322d4636ecaf2acd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/texture_compression_s3tc_update.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.texture_compression_s3tc_update + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.texture_compression_s3tc_update to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/texture_compression_s3tc_update.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.texture_compression_s3tc_update import * +from OpenGL.raw.GLES2.NV.texture_compression_s3tc_update import _EXTENSION_NAME + +def glInitTextureCompressionS3TcUpdateNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/texture_npot_2D_mipmap.py b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/texture_npot_2D_mipmap.py new file mode 100644 index 0000000000000000000000000000000000000000..af5192323d99fc16cd48249a0e28005243f29441 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/NV/texture_npot_2D_mipmap.py @@ -0,0 +1,23 @@ +'''OpenGL extension NV.texture_npot_2D_mipmap + +This module customises the behaviour of the +OpenGL.raw.GLES2.NV.texture_npot_2D_mipmap to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/NV/texture_npot_2D_mipmap.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.GLES2 import _types, _glgets +from OpenGL.raw.GLES2.NV.texture_npot_2D_mipmap import * +from OpenGL.raw.GLES2.NV.texture_npot_2D_mipmap import _EXTENSION_NAME + +def glInitTextureNpot2DMipmapNV(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/EGL_image.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/EGL_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad6216ee1f07e3a2f6992739dc38cb4f7716d5db Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/EGL_image.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/EGL_image_external.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/EGL_image_external.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..161e9f20c2e6c25e5f1192a8f9b25e8636075434 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/EGL_image_external.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/compressed_paletted_texture.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/compressed_paletted_texture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94feefe7dec2e9d890a2e139993843fffcf089f0 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/compressed_paletted_texture.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/depth32.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/depth32.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0acb49225e94078ba1e2f37efa9491014d81c05f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/depth32.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/depth_texture.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/depth_texture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdc5310e7af0f886483c791de01aad9b4e53aaac Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/depth_texture.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/element_index_uint.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/element_index_uint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9db4d656dd3a250e4c47d863997ab3dce1ee983f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/element_index_uint.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/fragment_precision_high.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/fragment_precision_high.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3988ed6903f41f9f1c5ae9d0b02a4f94f5fbff66 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/fragment_precision_high.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/get_program_binary.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/get_program_binary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f69e93a6d812d540ae80f4093bea95a59c99040 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/get_program_binary.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/mapbuffer.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/mapbuffer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5ec8fec8d69f57ca63a0e8a16183b45837baadc Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/mapbuffer.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/shader_multisample_interpolation.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/shader_multisample_interpolation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..334c5bd26b44c2c780d956328259d5c8e7b892bb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/shader_multisample_interpolation.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/standard_derivatives.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/standard_derivatives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44cd5b760adbc3a81d814f7672669006cdcf02aa Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/standard_derivatives.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/stencil1.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/stencil1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f9b182443a7152e45b1a213ff89177842740aab Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/stencil1.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/texture_compression_astc.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/texture_compression_astc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fbd5dc3e537e3715a88d94231e945a1d2bdf776 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/texture_compression_astc.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/texture_stencil8.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/texture_stencil8.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac5f9595916df94b4a2ea541e9f212d0d2bf1137 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/OES/__pycache__/texture_stencil8.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46bb10363918eb31aff92270efbe4ecf82f49202 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/GLES2/__pycache__/shaders.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/__pycache__/shaders.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3807ce7f75bb231cfab485437da9fd1e6f116c57 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/GLES2/__pycache__/shaders.cpython-310.pyc differ