id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
173,180 | from __future__ import annotations
from datetime import datetime
import functools
from itertools import zip_longest
import operator
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Hashable,
Iterable,
Literal,
NoReturn,
Sequence,
TypeVar,
cast,
final,
overload,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import (
NaT,
algos as libalgos,
index as libindex,
lib,
)
from pandas._libs.internals import BlockValuesRefs
import pandas._libs.join as libjoin
from pandas._libs.lib import (
is_datetime_array,
no_default,
)
from pandas._libs.missing import is_float_nan
from pandas._libs.tslibs import (
IncompatibleFrequency,
OutOfBoundsDatetime,
Timestamp,
tz_compare,
)
from pandas._typing import (
AnyAll,
ArrayLike,
Axes,
Axis,
DropKeep,
DtypeObj,
F,
IgnoreRaise,
IndexLabel,
JoinHow,
Level,
Shape,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import (
DuplicateLabelError,
InvalidIndexError,
)
from pandas.util._decorators import (
Appender,
cache_readonly,
doc,
)
from pandas.util._exceptions import (
find_stack_level,
rewrite_exception,
)
from pandas.core.dtypes.astype import (
astype_array,
astype_is_view,
)
from pandas.core.dtypes.cast import (
LossySetitemError,
can_hold_element,
common_dtype_categorical_compat,
find_result_type,
infer_dtype_from,
maybe_cast_pointwise_result,
np_can_hold_element,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_any_real_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_dtype_equal,
is_ea_or_datetimelike_dtype,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_iterator,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_signed_integer_dtype,
is_string_dtype,
needs_i8_conversion,
pandas_dtype,
validate_all_hashable,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCMultiIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas.core.dtypes.inference import is_dict_like
from pandas.core.dtypes.missing import (
array_equivalent,
is_valid_na_for_dtype,
isna,
)
from pandas.core import (
arraylike,
ops,
)
from pandas.core.accessor import CachedAccessor
import pandas.core.algorithms as algos
from pandas.core.array_algos.putmask import (
setitem_datetimelike_compat,
validate_putmask,
)
from pandas.core.arrays import (
ArrowExtensionArray,
BaseMaskedArray,
Categorical,
ExtensionArray,
)
from pandas.core.arrays.string_ import StringArray
from pandas.core.base import (
IndexOpsMixin,
PandasObject,
)
import pandas.core.common as com
from pandas.core.construction import (
ensure_wrapped_if_datetimelike,
extract_array,
sanitize_array,
)
from pandas.core.indexers import disallow_ndim_indexing
from pandas.core.indexes.frozen import FrozenList
from pandas.core.missing import clean_reindex_fill_method
from pandas.core.ops import get_op_result_name
from pandas.core.ops.invalid import make_invalid_op
from pandas.core.sorting import (
ensure_key_mapped,
get_group_index_sorter,
nargsort,
)
from pandas.core.strings.accessor import StringMethods
from pandas.io.formats.printing import (
PrettyDict,
default_pprint,
format_object_summary,
pprint_thing,
)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable sequence used for indexing and alignment.
The basic object storing axis labels for all pandas objects.
.. versionchanged:: 2.0.0
Index can hold all numpy numeric dtypes (except float16). Previously only
int64/uint64/float64 dtypes were accepted.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible.
See Also
--------
RangeIndex : Index implementing a monotonic integer range.
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical Index.
IntervalIndex : An Index of :class:`Interval` s.
DatetimeIndex : Index of datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
Notes
-----
An Index instance can **only** contain hashable objects.
An Index instance *can not* hold numpy float16 dtype.
Examples
--------
>>> pd.Index([1, 2, 3])
Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
>>> pd.Index([1, 2, 3], dtype="uint8")
Index([1, 2, 3], dtype='uint8')
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods; see github.com/cython/cython/issues/2647
# for why we need to wrap these instead of making them class attributes
# Moreover, cython will choose the appropriate-dtyped sub-function
# given the dtypes of the passed arguments
def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
# can_use_libjoin assures sv and ov are ndarrays
sv = cast(np.ndarray, sv)
ov = cast(np.ndarray, ov)
# similar but not identical to ov.searchsorted(sv)
return libjoin.left_join_indexer_unique(sv, ov)
def _left_indexer(
self: _IndexT, other: _IndexT
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
# can_use_libjoin assures sv and ov are ndarrays
sv = cast(np.ndarray, sv)
ov = cast(np.ndarray, ov)
joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
def _inner_indexer(
self: _IndexT, other: _IndexT
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
# can_use_libjoin assures sv and ov are ndarrays
sv = cast(np.ndarray, sv)
ov = cast(np.ndarray, ov)
joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
def _outer_indexer(
self: _IndexT, other: _IndexT
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
# can_use_libjoin assures sv and ov are ndarrays
sv = cast(np.ndarray, sv)
ov = cast(np.ndarray, ov)
joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
_typ: str = "index"
_data: ExtensionArray | np.ndarray
_data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = (
np.ndarray,
ExtensionArray,
)
_id: object | None = None
_name: Hashable = None
# MultiIndex.levels previously allowed setting the index name. We
# don't allow this anymore, and raise if it happens rather than
# failing silently.
_no_setting_name: bool = False
_comparables: list[str] = ["name"]
_attributes: list[str] = ["name"]
def _can_hold_strings(self) -> bool:
return not is_numeric_dtype(self)
_engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = {
np.dtype(np.int8): libindex.Int8Engine,
np.dtype(np.int16): libindex.Int16Engine,
np.dtype(np.int32): libindex.Int32Engine,
np.dtype(np.int64): libindex.Int64Engine,
np.dtype(np.uint8): libindex.UInt8Engine,
np.dtype(np.uint16): libindex.UInt16Engine,
np.dtype(np.uint32): libindex.UInt32Engine,
np.dtype(np.uint64): libindex.UInt64Engine,
np.dtype(np.float32): libindex.Float32Engine,
np.dtype(np.float64): libindex.Float64Engine,
np.dtype(np.complex64): libindex.Complex64Engine,
np.dtype(np.complex128): libindex.Complex128Engine,
}
def _engine_type(
self,
) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]:
return self._engine_types.get(self.dtype, libindex.ObjectEngine)
# whether we support partial string indexing. Overridden
# in DatetimeIndex and PeriodIndex
_supports_partial_string_indexing = False
_accessors = {"str"}
str = CachedAccessor("str", StringMethods)
_references = None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
dtype=None,
copy: bool = False,
name=None,
tupleize_cols: bool = True,
) -> Index:
from pandas.core.indexes.range import RangeIndex
name = maybe_extract_name(name, data, cls)
if dtype is not None:
dtype = pandas_dtype(dtype)
data_dtype = getattr(data, "dtype", None)
refs = None
if not copy and isinstance(data, (ABCSeries, Index)):
refs = data._references
# range
if isinstance(data, (range, RangeIndex)):
result = RangeIndex(start=data, copy=copy, name=name)
if dtype is not None:
return result.astype(dtype, copy=False)
return result
elif is_ea_or_datetimelike_dtype(dtype):
# non-EA dtype indexes have special casting logic, so we punt here
pass
elif is_ea_or_datetimelike_dtype(data_dtype):
pass
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if isinstance(data, ABCMultiIndex):
data = data._values
if data.dtype.kind not in ["i", "u", "f", "b", "c", "m", "M"]:
# GH#11836 we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
data = com.asarray_tuplesafe(data, dtype=_dtype_obj)
elif is_scalar(data):
raise cls._raise_scalar_data_error(data)
elif hasattr(data, "__array__"):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name)
elif not is_list_like(data) and not isinstance(data, memoryview):
# 2022-11-16 the memoryview check is only necessary on some CI
# builds, not clear why
raise cls._raise_scalar_data_error(data)
else:
if tupleize_cols:
# GH21470: convert iterable to list before determining if empty
if is_iterator(data):
data = list(data)
if data and all(isinstance(e, tuple) for e in data):
# we must be all tuples, otherwise don't construct
# 10697
from pandas.core.indexes.multi import MultiIndex
return MultiIndex.from_tuples(data, names=name)
# other iterable of some kind
if not isinstance(data, (list, tuple)):
# we allow set/frozenset, which Series/sanitize_array does not, so
# cast to list here
data = list(data)
if len(data) == 0:
# unlike Series, we default to object dtype:
data = np.array(data, dtype=object)
if len(data) and isinstance(data[0], tuple):
# Ensure we get 1-D array of tuples instead of 2D array.
data = com.asarray_tuplesafe(data, dtype=_dtype_obj)
try:
arr = sanitize_array(data, None, dtype=dtype, copy=copy)
except ValueError as err:
if "index must be specified when data is not list-like" in str(err):
raise cls._raise_scalar_data_error(data) from err
if "Data must be 1-dimensional" in str(err):
raise ValueError("Index data must be 1-dimensional") from err
raise
arr = ensure_wrapped_if_datetimelike(arr)
klass = cls._dtype_to_subclass(arr.dtype)
arr = klass._ensure_array(arr, arr.dtype, copy=False)
return klass._simple_new(arr, name, refs=refs)
def _ensure_array(cls, data, dtype, copy: bool):
"""
Ensure we have a valid array to pass to _simple_new.
"""
if data.ndim > 1:
# GH#13601, GH#20285, GH#27125
raise ValueError("Index data must be 1-dimensional")
elif dtype == np.float16:
# float16 not supported (no indexing engine)
raise NotImplementedError("float16 indexes are not supported")
if copy:
# asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
data = data.copy()
return data
def _dtype_to_subclass(cls, dtype: DtypeObj):
# Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
if isinstance(dtype, ExtensionDtype):
if isinstance(dtype, DatetimeTZDtype):
from pandas import DatetimeIndex
return DatetimeIndex
elif isinstance(dtype, CategoricalDtype):
from pandas import CategoricalIndex
return CategoricalIndex
elif isinstance(dtype, IntervalDtype):
from pandas import IntervalIndex
return IntervalIndex
elif isinstance(dtype, PeriodDtype):
from pandas import PeriodIndex
return PeriodIndex
return Index
if dtype.kind == "M":
from pandas import DatetimeIndex
return DatetimeIndex
elif dtype.kind == "m":
from pandas import TimedeltaIndex
return TimedeltaIndex
elif dtype.kind == "O":
# NB: assuming away MultiIndex
return Index
elif issubclass(dtype.type, str) or is_numeric_dtype(dtype):
return Index
raise NotImplementedError(dtype)
# NOTE for new Index creation:
# - _simple_new: It returns new Index with the same type as the caller.
# All metadata (such as name) must be provided by caller's responsibility.
# Using _shallow_copy is recommended because it fills these metadata
# otherwise specified.
# - _shallow_copy: It returns new Index with the same type (using
# _simple_new), but fills caller's metadata otherwise specified. Passed
# kwargs will overwrite corresponding metadata.
# See each method's docstring.
def _simple_new(
cls: type[_IndexT], values: ArrayLike, name: Hashable = None, refs=None
) -> _IndexT:
"""
We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse.
"""
assert isinstance(values, cls._data_cls), type(values)
result = object.__new__(cls)
result._data = values
result._name = name
result._cache = {}
result._reset_identity()
if refs is not None:
result._references = refs
else:
result._references = BlockValuesRefs()
result._references.add_index_reference(result)
return result
def _with_infer(cls, *args, **kwargs):
"""
Constructor that uses the 1.0.x behavior inferring numeric dtypes
for ndarray[object] inputs.
"""
result = cls(*args, **kwargs)
if result.dtype == _dtype_obj and not result._is_multi:
# error: Argument 1 to "maybe_convert_objects" has incompatible type
# "Union[ExtensionArray, ndarray[Any, Any]]"; expected
# "ndarray[Any, Any]"
values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type]
if values.dtype.kind in ["i", "u", "f", "b"]:
return Index(values, name=result.name)
return result
def _constructor(self: _IndexT) -> type[_IndexT]:
return type(self)
def _maybe_check_unique(self) -> None:
"""
Check that an Index has no duplicates.
This is typically only called via
`NDFrame.flags.allows_duplicate_labels.setter` when it's set to
True (duplicates aren't allowed).
Raises
------
DuplicateLabelError
When the index is not unique.
"""
if not self.is_unique:
msg = """Index has duplicates."""
duplicates = self._format_duplicate_message()
msg += f"\n{duplicates}"
raise DuplicateLabelError(msg)
def _format_duplicate_message(self) -> DataFrame:
"""
Construct the DataFrame for a DuplicateLabelError.
This returns a DataFrame indicating the labels and positions
of duplicates in an index. This should only be called when it's
already known that duplicates are present.
Examples
--------
>>> idx = pd.Index(['a', 'b', 'a'])
>>> idx._format_duplicate_message()
positions
label
a [0, 2]
"""
from pandas import Series
duplicates = self[self.duplicated(keep="first")].unique()
assert len(duplicates)
out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates]
if self._is_multi:
# test_format_duplicate_labels_message_multi
# error: "Type[Index]" has no attribute "from_tuples" [attr-defined]
out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined]
if self.nlevels == 1:
out = out.rename_axis("label")
return out.to_frame(name="positions")
# --------------------------------------------------------------------
# Index Internals Methods
def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT:
"""
Create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
name : Label, defaults to self.name
"""
name = self._name if name is no_default else name
return self._simple_new(values, name=name, refs=self._references)
def _view(self: _IndexT) -> _IndexT:
"""
fastpath to make a shallow copy, i.e. new object with same data.
"""
result = self._simple_new(self._values, name=self._name, refs=self._references)
result._cache = self._cache
return result
def _rename(self: _IndexT, name: Hashable) -> _IndexT:
"""
fastpath for rename if new name is already validated.
"""
result = self._view()
result._name = name
return result
def is_(self, other) -> bool:
"""
More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
Other object to compare against.
Returns
-------
bool
True if both have same underlying data, False otherwise.
See Also
--------
Index.identical : Works like ``Index.is_`` but also checks metadata.
"""
if self is other:
return True
elif not hasattr(other, "_id"):
return False
elif self._id is None or other._id is None:
return False
else:
return self._id is other._id
def _reset_identity(self) -> None:
"""
Initializes or resets ``_id`` attribute with new object.
"""
self._id = object()
def _cleanup(self) -> None:
self._engine.clear_mapping()
def _engine(
self,
) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine:
# For base class (object dtype) we get ObjectEngine
target_values = self._get_engine_target()
if isinstance(target_values, ExtensionArray):
if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)):
try:
return _masked_engines[target_values.dtype.name](target_values)
except KeyError:
# Not supported yet e.g. decimal
pass
elif self._engine_type is libindex.ObjectEngine:
return libindex.ExtensionEngine(target_values)
target_values = cast(np.ndarray, target_values)
# to avoid a reference cycle, bind `target_values` to a local variable, so
# `self` is not passed into the lambda.
if target_values.dtype == bool:
return libindex.BoolEngine(target_values)
elif target_values.dtype == np.complex64:
return libindex.Complex64Engine(target_values)
elif target_values.dtype == np.complex128:
return libindex.Complex128Engine(target_values)
elif needs_i8_conversion(self.dtype):
# We need to keep M8/m8 dtype when initializing the Engine,
# but don't want to change _get_engine_target bc it is used
# elsewhere
# error: Item "ExtensionArray" of "Union[ExtensionArray,
# ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr]
target_values = self._data._ndarray # type: ignore[union-attr]
# error: Argument 1 to "ExtensionEngine" has incompatible type
# "ndarray[Any, Any]"; expected "ExtensionArray"
return self._engine_type(target_values) # type: ignore[arg-type]
def _dir_additions_for_owner(self) -> set[str_t]:
"""
Add the string-like labels to the owner dataframe/series dir output.
If this is a MultiIndex, it's first level values are used.
"""
return {
c
for c in self.unique(level=0)[: get_option("display.max_dir_items")]
if isinstance(c, str) and c.isidentifier()
}
# --------------------------------------------------------------------
# Array-Like Methods
# ndarray compat
def __len__(self) -> int:
"""
Return the length of the Index.
"""
return len(self._data)
def __array__(self, dtype=None) -> np.ndarray:
"""
The array interface, return my values.
"""
return np.asarray(self._data, dtype=dtype)
def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs):
if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs):
return NotImplemented
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
if "out" in kwargs:
# e.g. test_dti_isub_tdi
return arraylike.dispatch_ufunc_with_out(
self, ufunc, method, *inputs, **kwargs
)
if method == "reduce":
result = arraylike.dispatch_reduction_ufunc(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
new_inputs = [x if x is not self else x._values for x in inputs]
result = getattr(ufunc, method)(*new_inputs, **kwargs)
if ufunc.nout == 2:
# i.e. np.divmod, np.modf, np.frexp
return tuple(self.__array_wrap__(x) for x in result)
if result.dtype == np.float16:
result = result.astype(np.float32)
return self.__array_wrap__(result)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc and other functions e.g. np.split.
"""
result = lib.item_from_zerodim(result)
if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1:
return result
return Index(result, name=self.name)
def dtype(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
return self._data.dtype
def ravel(self, order: str_t = "C") -> Index:
"""
Return a view on self.
Returns
-------
Index
See Also
--------
numpy.ndarray.ravel : Return a flattened array.
"""
return self[:]
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, "_typ"):
dtype = cls
if isinstance(cls, str):
dtype = pandas_dtype(cls)
if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion(
dtype
):
if dtype.kind == "m" and dtype != "m8[ns]":
# e.g. m8[s]
return self._data.view(cls)
idx_cls = self._dtype_to_subclass(dtype)
# NB: we only get here for subclasses that override
# _data_cls such that it is a type and not a tuple
# of types.
arr_cls = idx_cls._data_cls
arr = arr_cls(self._data.view("i8"), dtype=dtype)
return idx_cls._simple_new(arr, name=self.name, refs=self._references)
result = self._data.view(cls)
else:
result = self._view()
if isinstance(result, Index):
result._id = self._id
return result
def astype(self, dtype, copy: bool = True):
"""
Create an Index with values cast to dtypes.
The class of a new Index is determined by dtype. When conversion is
impossible, a TypeError exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
Note that any signed integer `dtype` is treated as ``'int64'``,
and any unsigned integer `dtype` is treated as ``'uint64'``,
regardless of the size.
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
Returns
-------
Index
Index with values cast to specified dtype.
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if is_dtype_equal(self.dtype, dtype):
# Ensure that self.astype(self.dtype) is self
return self.copy() if copy else self
values = self._data
if isinstance(values, ExtensionArray):
with rewrite_exception(type(values).__name__, type(self).__name__):
new_values = values.astype(dtype, copy=copy)
elif isinstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
# Note: for RangeIndex and CategoricalDtype self vs self._values
# behaves differently here.
new_values = cls._from_sequence(self, dtype=dtype, copy=copy)
else:
# GH#13149 specifically use astype_array instead of astype
new_values = astype_array(values, dtype=dtype, copy=copy)
# pass copy=False because any copying will be done in the astype above
result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False)
if (
not copy
and self._references is not None
and astype_is_view(self.dtype, dtype)
):
result._references = self._references
result._references.add_index_reference(result)
return result
_index_shared_docs[
"take"
] = """
Return a new %(klass)s of the values selected by the indices.
For internal compatibility with numpy arrays.
Parameters
----------
indices : array-like
Indices to be taken.
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : scalar, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 are regarded as NA. If Index doesn't hold NA, raise ValueError.
Returns
-------
Index
An index formed of elements at the given indices. Will be the same
type as self, except for RangeIndex.
See Also
--------
numpy.ndarray.take: Return an array formed from the
elements of a at the given indices.
"""
def take(
self,
indices,
axis: Axis = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
):
if kwargs:
nv.validate_take((), kwargs)
if is_scalar(indices):
raise TypeError("Expected indices to be array-like")
indices = ensure_platform_int(indices)
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
# Note: we discard fill_value and use self._na_value, only relevant
# in the case where allow_fill is True and fill_value is not None
values = self._values
if isinstance(values, np.ndarray):
taken = algos.take(
values, indices, allow_fill=allow_fill, fill_value=self._na_value
)
else:
# algos.take passes 'axis' keyword which not all EAs accept
taken = values.take(
indices, allow_fill=allow_fill, fill_value=self._na_value
)
# _constructor so RangeIndex-> Index with an int64 dtype
return self._constructor._simple_new(taken, name=self.name)
def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool:
"""
We only use pandas-style take when allow_fill is True _and_
fill_value is not None.
"""
if allow_fill and fill_value is not None:
# only fill if we are passing a non-None fill_value
if self._can_hold_na:
if (indices < -1).any():
raise ValueError(
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
else:
cls_name = type(self).__name__
raise ValueError(
f"Unable to fill values because {cls_name} cannot contain NA"
)
else:
allow_fill = False
return allow_fill
_index_shared_docs[
"repeat"
] = """
Repeat elements of a %(klass)s.
Returns a new %(klass)s where each element of the current %(klass)s
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
%(klass)s.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
%(klass)s
Newly created %(klass)s with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx
Index(['a', 'b', 'c'], dtype='object')
>>> idx.repeat(2)
Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object')
>>> idx.repeat([1, 2, 3])
Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object')
"""
def repeat(self, repeats, axis=None):
repeats = ensure_platform_int(repeats)
nv.validate_repeat((), {"axis": axis})
res_values = self._values.repeat(repeats)
# _constructor so RangeIndex-> Index with an int64 dtype
return self._constructor._simple_new(res_values, name=self.name)
# --------------------------------------------------------------------
# Copying Methods
def copy(
self: _IndexT,
name: Hashable | None = None,
deep: bool = False,
) -> _IndexT:
"""
Make a copy of this object.
Name is set on the new object.
Parameters
----------
name : Label, optional
Set name for new object.
deep : bool, default False
Returns
-------
Index
Index refer to new object which is a copy of this object.
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
name = self._validate_names(name=name, deep=deep)[0]
if deep:
new_data = self._data.copy()
new_index = type(self)._simple_new(new_data, name=name)
else:
new_index = self._rename(name=name)
return new_index
def __copy__(self: _IndexT, **kwargs) -> _IndexT:
return self.copy(**kwargs)
def __deepcopy__(self: _IndexT, memo=None) -> _IndexT:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
# --------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str_t:
"""
Return a string representation for this object.
"""
klass_name = type(self).__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
attrs_str = [f"{k}={v}" for k, v in attrs]
prepr = f",{space}".join(attrs_str)
# no data provided, just attributes
if data is None:
data = ""
return f"{klass_name}({data}{prepr})"
def _format_space(self) -> str_t:
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
def _formatter_func(self):
"""
Return the formatter function.
"""
return default_pprint
def _format_data(self, name=None) -> str_t:
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
is_justify = True
if self.inferred_type == "string":
is_justify = False
elif self.inferred_type == "categorical":
self = cast("CategoricalIndex", self)
if is_object_dtype(self.categories):
is_justify = False
return format_object_summary(
self,
self._formatter_func,
is_justify=is_justify,
name=name,
line_break_each_value=self._is_multi,
)
def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]:
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs: list[tuple[str_t, str_t | int | bool | None]] = []
if not self._is_multi:
attrs.append(("dtype", f"'{self.dtype}'"))
if self.name is not None:
attrs.append(("name", default_pprint(self.name)))
elif self._is_multi and any(x is not None for x in self.names):
attrs.append(("names", default_pprint(self.names)))
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
attrs.append(("length", len(self)))
return attrs
def _get_level_names(self) -> Hashable | Sequence[Hashable]:
"""
Return a name or list of names with None replaced by the level number.
"""
if self._is_multi:
return [
level if name is None else name for level, name in enumerate(self.names)
]
else:
return 0 if self.name is None else self.name
def _mpl_repr(self) -> np.ndarray:
# how to represent ourselves to matplotlib
if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M":
return cast(np.ndarray, self.values)
return self.astype(object, copy=False)._values
def format(
self,
name: bool = False,
formatter: Callable | None = None,
na_rep: str_t = "NaN",
) -> list[str_t]:
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(
pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, na_rep=na_rep)
def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]:
from pandas.io.formats.format import format_array
values = self._values
if is_object_dtype(values.dtype):
values = cast(np.ndarray, values)
values = lib.maybe_convert_objects(values, safe=True)
result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values]
# could have nans
mask = is_float_nan(values)
if mask.any():
result_arr = np.array(result)
result_arr[mask] = na_rep
result = result_arr.tolist()
else:
result = trim_front(format_array(values, None, justify="left"))
return header + result
def _format_native_types(
self,
*,
na_rep: str_t = "",
decimal: str_t = ".",
float_format=None,
date_format=None,
quoting=None,
) -> npt.NDArray[np.object_]:
"""
Actually format specific types of the index.
"""
from pandas.io.formats.format import FloatArrayFormatter
if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype):
formatter = FloatArrayFormatter(
self._values,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
fixed_width=False,
)
return formatter.get_result_as_array()
mask = isna(self)
if not is_object_dtype(self) and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def _summary(self, name=None) -> str_t:
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if hasattr(head, "format") and not isinstance(head, str):
head = head.format()
elif needs_i8_conversion(self.dtype):
# e.g. Timedelta, display as values, not quoted
head = self._formatter_func(head).replace("'", "")
tail = self[-1]
if hasattr(tail, "format") and not isinstance(tail, str):
tail = tail.format()
elif needs_i8_conversion(self.dtype):
# e.g. Timedelta, display as values, not quoted
tail = self._formatter_func(tail).replace("'", "")
index_summary = f", {head} to {tail}"
else:
index_summary = ""
if name is None:
name = type(self).__name__
return f"{name}: {len(self)} entries{index_summary}"
# --------------------------------------------------------------------
# Conversion Methods
def to_flat_index(self: _IndexT) -> _IndexT:
"""
Identity method.
This is implemented for compatibility with subclass implementations
when chaining.
Returns
-------
pd.Index
Caller.
See Also
--------
MultiIndex.to_flat_index : Subclass implementation.
"""
return self
def to_series(self, index=None, name: Hashable = None) -> Series:
"""
Create a Series with both index and values equal to the index keys.
Useful with map for returning an indexer based on an index.
Parameters
----------
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Name of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
The dtype will be based on the type of the Index values.
See Also
--------
Index.to_frame : Convert an Index to a DataFrame.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
By default, the original Index and original name is reused.
>>> idx.to_series()
animal
Ant Ant
Bear Bear
Cow Cow
Name: animal, dtype: object
To enforce a new Index, specify new labels to ``index``:
>>> idx.to_series(index=[0, 1, 2])
0 Ant
1 Bear
2 Cow
Name: animal, dtype: object
To override the name of the resulting column, specify `name`:
>>> idx.to_series(name='zoo')
animal
Ant Ant
Bear Bear
Cow Cow
Name: zoo, dtype: object
"""
from pandas import Series
if index is None:
index = self._view()
if name is None:
name = self.name
return Series(self._values.copy(), index=index, name=name)
def to_frame(
self, index: bool = True, name: Hashable = lib.no_default
) -> DataFrame:
"""
Create a DataFrame with a column containing the Index.
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original Index.
name : object, defaults to index.name
The passed name should substitute for the index name (if it has
one).
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
To override the name of the resulting column, specify `name`:
>>> idx.to_frame(index=False, name='zoo')
zoo
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
if name is lib.no_default:
name = self._get_level_names()
result = DataFrame({name: self._values.copy()})
if index:
result.index = self
return result
# --------------------------------------------------------------------
# Name-Centric Methods
def name(self) -> Hashable:
"""
Return Index or MultiIndex name.
"""
return self._name
def name(self, value: Hashable) -> None:
if self._no_setting_name:
# Used in MultiIndex.levels to avoid silently ignoring name updates.
raise RuntimeError(
"Cannot set name on a level of a MultiIndex. Use "
"'MultiIndex.set_names' instead."
)
maybe_extract_name(value, None, type(self))
self._name = value
def _validate_names(
self, name=None, names=None, deep: bool = False
) -> list[Hashable]:
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
if names is None and name is None:
new_names = deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
new_names = names
elif not is_list_like(name):
new_names = [name]
else:
new_names = name
if len(new_names) != len(self.names):
raise ValueError(
f"Length of new names must be {len(self.names)}, got {len(new_names)}"
)
# All items in 'new_names' need to be hashable
validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name")
return new_names
def _get_default_index_names(
self, names: Hashable | Sequence[Hashable] | None = None, default=None
) -> list[Hashable]:
"""
Get names of index.
Parameters
----------
names : int, str or 1-dimensional list, default None
Index names to set.
default : str
Default name of index.
Raises
------
TypeError
if names not str or list-like
"""
from pandas.core.indexes.multi import MultiIndex
if names is not None:
if isinstance(names, (int, str)):
names = [names]
if not isinstance(names, list) and names is not None:
raise ValueError("Index names must be str or 1-dimensional list")
if not names:
if isinstance(self, MultiIndex):
names = com.fill_missing_names(self.names)
else:
names = [default] if self.name is None else [self.name]
return names
def _get_names(self) -> FrozenList:
return FrozenList((self.name,))
def _set_names(self, values, *, level=None) -> None:
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError("Names must be a list-like")
if len(values) != 1:
raise ValueError(f"Length of new names must be 1, got {len(values)}")
# GH 20527
# All items in 'name' need to be hashable:
validate_all_hashable(*values, error_name=f"{type(self).__name__}.name")
self._name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(
self: _IndexT, names, *, level=..., inplace: Literal[False] = ...
) -> _IndexT:
...
def set_names(self, names, *, level=..., inplace: Literal[True]) -> None:
...
def set_names(
self: _IndexT, names, *, level=..., inplace: bool = ...
) -> _IndexT | None:
...
def set_names(
self: _IndexT, names, *, level=None, inplace: bool = False
) -> _IndexT | None:
"""
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : label or list of label or dict-like for MultiIndex
Name(s) to set.
.. versionchanged:: 1.3.0
level : int, label or list of int or label, optional
If the index is a MultiIndex and names is not dict-like, level(s) to set
(None for all levels). Otherwise level must be None.
.. versionchanged:: 1.3.0
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Index.rename : Able to set new names without level.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names('quarter')
Index([1, 2, 3, 4], dtype='int64', name='quarter')
>>> idx = pd.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
>>> idx = idx.set_names(['kind', 'year'])
>>> idx.set_names('species', level=0)
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
When renaming levels with a dict, levels can not be passed.
>>> idx.set_names({'kind': 'snake'})
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['snake', 'year'])
"""
if level is not None and not isinstance(self, ABCMultiIndex):
raise ValueError("Level must be None for non-MultiIndex")
if level is not None and not is_list_like(level) and is_list_like(names):
raise TypeError("Names must be a string when a single level is provided.")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if is_dict_like(names) and not isinstance(self, ABCMultiIndex):
raise TypeError("Can only pass dict-like as `names` for MultiIndex.")
if is_dict_like(names) and level is not None:
raise TypeError("Can not pass level for dictlike `names`.")
if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None:
# Transform dict to list of new names and corresponding levels
level, names_adjusted = [], []
for i, name in enumerate(self.names):
if name in names.keys():
level.append(i)
names_adjusted.append(names[name])
names = names_adjusted
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._view()
idx._set_names(names, level=level)
if not inplace:
return idx
return None
def rename(self, name, inplace: bool = False):
"""
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Length of names must match number of levels in MultiIndex.
Parameters
----------
name : label or list of labels
Name(s) to set.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Index.set_names : Able to set new names partially and by level.
Examples
--------
>>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score')
>>> idx.rename('grade')
Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')
>>> idx = pd.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]],
... names=['kind', 'year'])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['kind', 'year'])
>>> idx.rename(['species', 'year'])
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
>>> idx.rename('species')
Traceback (most recent call last):
TypeError: Must pass list-like as `names`.
"""
return self.set_names([name], inplace=inplace)
# --------------------------------------------------------------------
# Level-Centric Methods
def nlevels(self) -> int:
"""
Number of levels.
"""
return 1
def _sort_levels_monotonic(self: _IndexT) -> _IndexT:
"""
Compat with MultiIndex.
"""
return self
def _validate_index_level(self, level) -> None:
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError(
"Too many levels: Index has only 1 level, "
f"{level} is not a valid level number"
)
if level > 0:
raise IndexError(
f"Too many levels: Index has only 1 level, not {level + 1}"
)
elif level != self.name:
raise KeyError(
f"Requested level ({level}) does not match index name ({self.name})"
)
def _get_level_number(self, level) -> int:
self._validate_index_level(level)
return 0
def sortlevel(
self, level=None, ascending: bool | list[bool] = True, sort_remaining=None
):
"""
For internal compatibility with the Index API.
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : bool, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
Index
"""
if not isinstance(ascending, (list, bool)):
raise TypeError(
"ascending must be a single bool value or"
"a list of bool values of length 1"
)
if isinstance(ascending, list):
if len(ascending) != 1:
raise TypeError("ascending must be a list of bool values of length 1")
ascending = ascending[0]
if not isinstance(ascending, bool):
raise TypeError("ascending must be a bool value")
return self.sort_values(return_indexer=True, ascending=ascending)
def _get_level_values(self, level) -> Index:
"""
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
MultiIndex.get_level_values : Get values for a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> idx = pd.Index(list('abc'))
>>> idx
Index(['a', 'b', 'c'], dtype='object')
Get level values by supplying `level` as integer:
>>> idx.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object')
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
def droplevel(self, level: IndexLabel = 0):
"""
Return index with requested level(s) removed.
If resulting index has only 1 level left, the result will be
of Index type, not MultiIndex. The original index is not modified inplace.
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
Index or MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.droplevel()
MultiIndex([(3, 5),
(4, 6)],
names=['y', 'z'])
>>> mi.droplevel(2)
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.droplevel('z')
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.droplevel(['x', 'y'])
Index([5, 6], dtype='int64', name='z')
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]
return self._drop_level_numbers(levnums)
def _drop_level_numbers(self, levnums: list[int]):
"""
Drop MultiIndex levels by level _number_, not name.
"""
if not levnums and not isinstance(self, ABCMultiIndex):
return self
if len(levnums) >= self.nlevels:
raise ValueError(
f"Cannot remove {len(levnums)} levels from an index with "
f"{self.nlevels} levels: at least one level must be left."
)
# The two checks above guarantee that here self is a MultiIndex
self = cast("MultiIndex", self)
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_codes.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
lev = new_levels[0]
if len(lev) == 0:
# If lev is empty, lev.take will fail GH#42055
if len(new_codes[0]) == 0:
# GH#45230 preserve RangeIndex here
# see test_reset_index_empty_rangeindex
result = lev[:0]
else:
res_values = algos.take(lev._values, new_codes[0], allow_fill=True)
# _constructor instead of type(lev) for RangeIndex compat GH#35230
result = lev._constructor._simple_new(res_values, name=new_names[0])
else:
# set nan if needed
mask = new_codes[0] == -1
result = new_levels[0].take(new_codes[0])
if mask.any():
result = result.putmask(mask, np.nan)
result._name = new_names[0]
return result
else:
from pandas.core.indexes.multi import MultiIndex
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=new_names,
verify_integrity=False,
)
# --------------------------------------------------------------------
# Introspection Methods
def _can_hold_na(self) -> bool:
if isinstance(self.dtype, ExtensionDtype):
if isinstance(self.dtype, IntervalDtype):
# FIXME(GH#45720): this is inaccurate for integer-backed
# IntervalArray, but without it other.categories.take raises
# in IntervalArray._cmp_method
return True
return self.dtype._can_hold_na
if self.dtype.kind in ["i", "u", "b"]:
return False
return True
def is_monotonic_increasing(self) -> bool:
"""
Return a boolean if the values are equal or increasing.
Returns
-------
bool
See Also
--------
Index.is_monotonic_decreasing : Check if the values are equal or decreasing.
Examples
--------
>>> pd.Index([1, 2, 3]).is_monotonic_increasing
True
>>> pd.Index([1, 2, 2]).is_monotonic_increasing
True
>>> pd.Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
def is_monotonic_decreasing(self) -> bool:
"""
Return a boolean if the values are equal or decreasing.
Returns
-------
bool
See Also
--------
Index.is_monotonic_increasing : Check if the values are equal or increasing.
Examples
--------
>>> pd.Index([3, 2, 1]).is_monotonic_decreasing
True
>>> pd.Index([3, 2, 2]).is_monotonic_decreasing
True
>>> pd.Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
def _is_strictly_monotonic_increasing(self) -> bool:
"""
Return if the index is strictly monotonic increasing
(only increasing) values.
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
def _is_strictly_monotonic_decreasing(self) -> bool:
"""
Return if the index is strictly monotonic decreasing
(only decreasing) values.
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_unique(self) -> bool:
"""
Return if the index has unique values.
Returns
-------
bool
See Also
--------
Index.has_duplicates : Inverse method that checks if it has duplicate values.
Examples
--------
>>> idx = pd.Index([1, 5, 7, 7])
>>> idx.is_unique
False
>>> idx = pd.Index([1, 5, 7])
>>> idx.is_unique
True
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_unique
False
>>> idx = pd.Index(["Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_unique
True
"""
return self._engine.is_unique
def has_duplicates(self) -> bool:
"""
Check if the Index has duplicate values.
Returns
-------
bool
Whether or not the Index has duplicate values.
See Also
--------
Index.is_unique : Inverse method that checks if it has unique values.
Examples
--------
>>> idx = pd.Index([1, 5, 7, 7])
>>> idx.has_duplicates
True
>>> idx = pd.Index([1, 5, 7])
>>> idx.has_duplicates
False
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.has_duplicates
True
>>> idx = pd.Index(["Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.has_duplicates
False
"""
return not self.is_unique
def is_boolean(self) -> bool:
"""
Check if the Index only consists of booleans.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_bool_dtype` instead.
Returns
-------
bool
Whether or not the Index only consists of booleans.
See Also
--------
is_integer : Check if the Index only consists of integers (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_object : Check if the Index is of the object dtype (deprecated).
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index([True, False, True])
>>> idx.is_boolean() # doctest: +SKIP
True
>>> idx = pd.Index(["True", "False", "True"])
>>> idx.is_boolean() # doctest: +SKIP
False
>>> idx = pd.Index([True, False, "True"])
>>> idx.is_boolean() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_boolean is deprecated. "
"Use pandas.api.types.is_bool_type instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["boolean"]
def is_integer(self) -> bool:
"""
Check if the Index only consists of integers.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_integer_dtype` instead.
Returns
-------
bool
Whether or not the Index only consists of integers.
See Also
--------
is_boolean : Check if the Index only consists of booleans (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_object : Check if the Index is of the object dtype. (deprecated).
is_categorical : Check if the Index holds categorical data (deprecated).
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx.is_integer() # doctest: +SKIP
True
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_integer() # doctest: +SKIP
False
>>> idx = pd.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_integer() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_integer is deprecated. "
"Use pandas.api.types.is_integer_dtype instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["integer"]
def is_floating(self) -> bool:
"""
Check if the Index is a floating type.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_float_dtype` instead
The Index may consist of only floats, NaNs, or a mix of floats,
integers, or NaNs.
Returns
-------
bool
Whether or not the Index only consists of only consists of floats, NaNs, or
a mix of floats, integers, or NaNs.
See Also
--------
is_boolean : Check if the Index only consists of booleans (deprecated).
is_integer : Check if the Index only consists of integers (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_object : Check if the Index is of the object dtype. (deprecated).
is_categorical : Check if the Index holds categorical data (deprecated).
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_floating() # doctest: +SKIP
True
>>> idx = pd.Index([1.0, 2.0, np.nan, 4.0])
>>> idx.is_floating() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4, np.nan])
>>> idx.is_floating() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx.is_floating() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_floating is deprecated. "
"Use pandas.api.types.is_float_dtype instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"]
def is_numeric(self) -> bool:
"""
Check if the Index only consists of numeric data.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_numeric_dtype` instead.
Returns
-------
bool
Whether or not the Index only consists of numeric data.
See Also
--------
is_boolean : Check if the Index only consists of booleans (deprecated).
is_integer : Check if the Index only consists of integers (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_object : Check if the Index is of the object dtype. (deprecated).
is_categorical : Check if the Index holds categorical data (deprecated).
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4.0])
>>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4.0, np.nan])
>>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"])
>>> idx.is_numeric() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_numeric is deprecated. "
"Use pandas.api.types.is_any_real_numeric_dtype instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["integer", "floating"]
def is_object(self) -> bool:
"""
Check if the Index is of the object dtype.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_object_dtype` instead.
Returns
-------
bool
Whether or not the Index is of the object dtype.
See Also
--------
is_boolean : Check if the Index only consists of booleans (deprecated).
is_integer : Check if the Index only consists of integers (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_categorical : Check if the Index holds categorical data (deprecated).
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_object() # doctest: +SKIP
True
>>> idx = pd.Index(["Apple", "Mango", 2.0])
>>> idx.is_object() # doctest: +SKIP
True
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_object() # doctest: +SKIP
False
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_object() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_object is deprecated."
"Use pandas.api.types.is_object_dtype instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return is_object_dtype(self.dtype)
def is_categorical(self) -> bool:
"""
Check if the Index holds categorical data.
.. deprecated:: 2.0.0
Use :meth:`pandas.api.types.is_categorical_dtype` instead.
Returns
-------
bool
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
is_boolean : Check if the Index only consists of booleans (deprecated).
is_integer : Check if the Index only consists of integers (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_object : Check if the Index is of the object dtype. (deprecated).
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical() # doctest: +SKIP
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical() # doctest: +SKIP
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_categorical is deprecated."
"Use pandas.api.types.is_categorical_dtype instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["categorical"]
def is_interval(self) -> bool:
"""
Check if the Index holds Interval objects.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_interval_dtype` instead.
Returns
-------
bool
Whether or not the Index holds Interval objects.
See Also
--------
IntervalIndex : Index for Interval objects.
is_boolean : Check if the Index only consists of booleans (deprecated).
is_integer : Check if the Index only consists of integers (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_object : Check if the Index is of the object dtype. (deprecated).
is_categorical : Check if the Index holds categorical data (deprecated).
Examples
--------
>>> idx = pd.Index([pd.Interval(left=0, right=5),
... pd.Interval(left=5, right=10)])
>>> idx.is_interval() # doctest: +SKIP
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_interval() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_interval is deprecated."
"Use pandas.api.types.is_interval_dtype instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["interval"]
def _holds_integer(self) -> bool:
"""
Whether the type is an integer type.
"""
return self.inferred_type in ["integer", "mixed-integer"]
def holds_integer(self) -> bool:
"""
Whether the type is an integer type.
.. deprecated:: 2.0.0
Use `pandas.api.types.infer_dtype` instead
"""
warnings.warn(
f"{type(self).__name__}.holds_integer is deprecated. "
"Use pandas.api.types.infer_dtype instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self._holds_integer()
def inferred_type(self) -> str_t:
"""
Return a string of the type inferred from the values.
"""
return lib.infer_dtype(self._values, skipna=False)
def _is_all_dates(self) -> bool:
"""
Whether or not the index values only consist of dates.
"""
if needs_i8_conversion(self.dtype):
return True
elif self.dtype != _dtype_obj:
# TODO(ExtensionIndex): 3rd party EA might override?
# Note: this includes IntervalIndex, even when the left/right
# contain datetime-like objects.
return False
elif self._is_multi:
return False
return is_datetime_array(ensure_object(self._values))
def _is_multi(self) -> bool:
"""
Cached check equivalent to isinstance(self, MultiIndex)
"""
return isinstance(self, ABCMultiIndex)
# --------------------------------------------------------------------
# Pickle Methods
def __reduce__(self):
d = {"data": self._data, "name": self.name}
return _new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Null Handling Methods
def _na_value(self):
"""The expected NA value to use with this index."""
dtype = self.dtype
if isinstance(dtype, np.dtype):
if dtype.kind in ["m", "M"]:
return NaT
return np.nan
return dtype.na_value
def _isnan(self) -> npt.NDArray[np.bool_]:
"""
Return if each value is NaN.
"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
def hasnans(self) -> bool:
"""
Return True if there are any NaNs.
Enables various performance speedups.
Returns
-------
bool
"""
if self._can_hold_na:
return bool(self._isnan.any())
else:
return False
def isna(self) -> npt.NDArray[np.bool_]:
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
numpy.ndarray[bool]
A boolean array of whether my values are NA.
See Also
--------
Index.notna : Boolean inverse of isna.
Index.dropna : Omit entries with missing values.
isna : Top-level isna.
Series.isna : Detect missing values in Series object.
Examples
--------
Show which entries in a pandas.Index are NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
array([False, False, True])
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
array([False, False, False, True])
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
... pd.Timestamp(''), None, pd.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.isna()
array([False, True, True, True])
"""
return self._isnan
isnull = isna
def notna(self) -> npt.NDArray[np.bool_]:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``
values.
Returns
-------
numpy.ndarray[bool]
Boolean array to indicate which entries are not NA.
See Also
--------
Index.notnull : Alias of notna.
Index.isna: Inverse of notna.
notna : Top-level notna.
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.isna()
notnull = notna
def fillna(self, value=None, downcast=None):
"""
Fill NA/NaN values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
Index
See Also
--------
DataFrame.fillna : Fill NaN values of a DataFrame.
Series.fillna : Fill NaN Values of a Series.
"""
value = self._require_scalar(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if it has NaTs
# _with_infer needed for test_fillna_categorical
return Index._with_infer(result, name=self.name)
raise NotImplementedError(
f"{type(self).__name__}.fillna does not support 'downcast' "
"argument values other than 'None'."
)
return self._view()
def dropna(self: _IndexT, how: AnyAll = "any") -> _IndexT:
"""
Return Index without NA/NaN values.
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
Index
"""
if how not in ("any", "all"):
raise ValueError(f"invalid how option: {how}")
if self.hasnans:
res_values = self._values[~self._isnan]
return type(self)._simple_new(res_values, name=self.name)
return self._view()
# --------------------------------------------------------------------
# Uniqueness Methods
def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT:
"""
Return unique values in the index.
Unique values are returned in order of appearance, this does NOT sort.
Parameters
----------
level : int or hashable, optional
Only return values from specified level (for MultiIndex).
If int, gets the level by integer position, else by level name.
Returns
-------
Index
See Also
--------
unique : Numpy array of unique values in that column.
Series.unique : Return unique values of Series object.
"""
if level is not None:
self._validate_index_level(level)
if self.is_unique:
return self._view()
result = super().unique()
return self._shallow_copy(result)
def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT:
"""
Return Index with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
Returns
-------
Index
See Also
--------
Series.drop_duplicates : Equivalent method on Series.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Index.duplicated : Related method on Index, indicating duplicate
Index values.
Examples
--------
Generate an pandas.Index with duplicate values.
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
The `keep` parameter controls which duplicate values are removed.
The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> idx.drop_duplicates(keep='first')
Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')
The value 'last' keeps the last occurrence for each set of duplicated
entries.
>>> idx.drop_duplicates(keep='last')
Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')
The value ``False`` discards all sets of duplicated entries.
>>> idx.drop_duplicates(keep=False)
Index(['cow', 'beetle', 'hippo'], dtype='object')
"""
if self.is_unique:
return self._view()
return super().drop_duplicates(keep=keep)
def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]:
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
np.ndarray[bool]
See Also
--------
Series.duplicated : Equivalent method on pandas.Series.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Index.drop_duplicates : Remove duplicate values from Index.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep='first')
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> idx.duplicated(keep='last')
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
"""
if self.is_unique:
# fastpath available bc we are immutable
return np.zeros(len(self), dtype=bool)
return self._duplicated(keep=keep)
# --------------------------------------------------------------------
# Arithmetic & Logical Methods
def __iadd__(self, other):
# alias for __add__
return self + other
def __nonzero__(self) -> NoReturn:
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
# --------------------------------------------------------------------
# Set Operation Methods
def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
"""
name = get_op_result_name(self, other)
if self.name is not name:
return self.rename(name)
return self
def _validate_sort_keyword(self, sort):
if sort not in [None, False, True]:
raise ValueError(
"The 'sort' keyword only takes the values of "
f"None, True, or False; {sort} was passed."
)
def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]:
"""
With mismatched timezones, cast both to UTC.
"""
# Caller is responsibelf or checking
# `not is_dtype_equal(self.dtype, other.dtype)`
if (
isinstance(self, ABCDatetimeIndex)
and isinstance(other, ABCDatetimeIndex)
and self.tz is not None
and other.tz is not None
):
# GH#39328, GH#45357
left = self.tz_convert("UTC")
right = other.tz_convert("UTC")
return left, right
return self, other
def union(self, other, sort=None):
"""
Form the union of two Index objects.
If the Index objects are incompatible, both Index objects will be
cast to dtype('object') first.
Parameters
----------
other : Index or array-like
sort : bool or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` or `other` has length 0.
3. Some values in `self` or `other` cannot be compared.
A RuntimeWarning is issued in this case.
* False : do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Examples
--------
Union matching dtypes
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Index([1, 2, 3, 4, 5, 6], dtype='int64')
Union mismatched dtypes
>>> idx1 = pd.Index(['a', 'b', 'c', 'd'])
>>> idx2 = pd.Index([1, 2, 3, 4])
>>> idx1.union(idx2)
Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object')
MultiIndex case
>>> idx1 = pd.MultiIndex.from_arrays(
... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
... )
>>> idx1
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue')],
)
>>> idx2 = pd.MultiIndex.from_arrays(
... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]]
... )
>>> idx2
MultiIndex([(3, 'Red'),
(3, 'Green'),
(2, 'Red'),
(2, 'Green')],
)
>>> idx1.union(idx2)
MultiIndex([(1, 'Blue'),
(1, 'Red'),
(2, 'Blue'),
(2, 'Green'),
(2, 'Red'),
(3, 'Green'),
(3, 'Red')],
)
>>> idx1.union(idx2, sort=False)
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue'),
(3, 'Red'),
(3, 'Green'),
(2, 'Green')],
)
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not is_dtype_equal(self.dtype, other.dtype):
if (
isinstance(self, ABCMultiIndex)
and not is_object_dtype(_unpack_nested_dtype(other))
and len(other) > 0
):
raise NotImplementedError(
"Can only union MultiIndex with MultiIndex or Index of tuples, "
"try mi.to_flat_index().union(other) instead."
)
self, other = self._dti_setop_align_tzs(other, "union")
dtype = self._find_common_type_compat(other)
left = self.astype(dtype, copy=False)
right = other.astype(dtype, copy=False)
return left.union(right, sort=sort)
elif not len(other) or self.equals(other):
# NB: whether this (and the `if not len(self)` check below) come before
# or after the is_dtype_equal check above affects the returned dtype
result = self._get_reconciled_name_object(other)
if sort is True:
return result.sort_values()
return result
elif not len(self):
result = other._get_reconciled_name_object(self)
if sort is True:
return result.sort_values()
return result
result = self._union(other, sort=sort)
return self._wrap_setop_result(other, result)
def _union(self, other: Index, sort):
"""
Specific union logic should go here. In subclasses, union behavior
should be overwritten here rather than in `self.union`.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Whether to sort the resulting index.
* False : do not sort the result.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
Returns
-------
Index
"""
lvals = self._values
rvals = other._values
if (
sort is None
and self.is_monotonic_increasing
and other.is_monotonic_increasing
and not (self.has_duplicates and other.has_duplicates)
and self._can_use_libjoin
):
# Both are monotonic and at least one is unique, so can use outer join
# (actually don't need either unique, but without this restriction
# test_union_same_value_duplicated_in_both fails)
try:
return self._outer_indexer(other)[0]
except (TypeError, IncompatibleFrequency):
# incomparable objects; should only be for object dtype
value_list = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
value_list.extend([x for x in rvals if x not in value_set])
# If objects are unorderable, we must have object dtype.
return np.array(value_list, dtype=object)
elif not other.is_unique:
# other has duplicates
result_dups = algos.union_with_duplicates(self, other)
return _maybe_try_sort(result_dups, sort)
# The rest of this method is analogous to Index._intersection_via_get_indexer
# Self may have duplicates; other already checked as unique
# find indexes of things in "other" that are not in "self"
if self._index_as_unique:
indexer = self.get_indexer(other)
missing = (indexer == -1).nonzero()[0]
else:
missing = algos.unique1d(self.get_indexer_non_unique(other)[1])
result: Index | MultiIndex | ArrayLike
if self._is_multi:
# Preserve MultiIndex to avoid losing dtypes
result = self.append(other.take(missing))
else:
if len(missing) > 0:
other_diff = rvals.take(missing)
result = concat_compat((lvals, other_diff))
else:
result = lvals
if not self.is_monotonic_increasing or not other.is_monotonic_increasing:
# if both are monotonic then result should already be sorted
result = _maybe_try_sort(result, sort)
return result
def _wrap_setop_result(self, other: Index, result) -> Index:
name = get_op_result_name(self, other)
if isinstance(result, Index):
if result.name != name:
result = result.rename(name)
else:
result = self._shallow_copy(result, name=name)
return result
def intersection(self, other, sort: bool = False):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
sort : True, False or None, default False
Whether to sort the resulting index.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
* False : do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Index([3, 4], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not is_dtype_equal(self.dtype, other.dtype):
self, other = self._dti_setop_align_tzs(other, "intersection")
if self.equals(other):
if self.has_duplicates:
result = self.unique()._get_reconciled_name_object(other)
else:
result = self._get_reconciled_name_object(other)
if sort is True:
result = result.sort_values()
return result
if len(self) == 0 or len(other) == 0:
# fastpath; we need to be careful about having commutativity
if self._is_multi or other._is_multi:
# _convert_can_do_setop ensures that we have both or neither
# We retain self.levels
return self[:0].rename(result_name)
dtype = self._find_common_type_compat(other)
if is_dtype_equal(self.dtype, dtype):
# Slicing allows us to retain DTI/TDI.freq, RangeIndex
# Note: self[:0] vs other[:0] affects
# 1) which index's `freq` we get in DTI/TDI cases
# This may be a historical artifact, i.e. no documented
# reason for this choice.
# 2) The `step` we get in RangeIndex cases
if len(self) == 0:
return self[:0].rename(result_name)
else:
return other[:0].rename(result_name)
return Index([], dtype=dtype, name=result_name)
elif not self._should_compare(other):
# We can infer that the intersection is empty.
if isinstance(self, ABCMultiIndex):
return self[:0].rename(result_name)
return Index([], name=result_name)
elif not is_dtype_equal(self.dtype, other.dtype):
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
other = other.astype(dtype, copy=False)
return this.intersection(other, sort=sort)
result = self._intersection(other, sort=sort)
return self._wrap_intersection_result(other, result)
def _intersection(self, other: Index, sort: bool = False):
"""
intersection specialized to the case with matching dtypes.
"""
if (
self.is_monotonic_increasing
and other.is_monotonic_increasing
and self._can_use_libjoin
and not isinstance(self, ABCMultiIndex)
):
try:
res_indexer, indexer, _ = self._inner_indexer(other)
except TypeError:
# non-comparable; should only be for object dtype
pass
else:
# TODO: algos.unique1d should preserve DTA/TDA
if is_numeric_dtype(self):
# This is faster, because Index.unique() checks for uniqueness
# before calculating the unique values.
res = algos.unique1d(res_indexer)
else:
result = self.take(indexer)
res = result.drop_duplicates()
return ensure_wrapped_if_datetimelike(res)
res_values = self._intersection_via_get_indexer(other, sort=sort)
res_values = _maybe_try_sort(res_values, sort)
return res_values
def _wrap_intersection_result(self, other, result):
# We will override for MultiIndex to handle empty results
return self._wrap_setop_result(other, result)
def _intersection_via_get_indexer(
self, other: Index | MultiIndex, sort
) -> ArrayLike | MultiIndex:
"""
Find the intersection of two Indexes using get_indexer.
Returns
-------
np.ndarray or ExtensionArray
The returned array will be unique.
"""
left_unique = self.unique()
right_unique = other.unique()
# even though we are unique, we need get_indexer_for for IntervalIndex
indexer = left_unique.get_indexer_for(right_unique)
mask = indexer != -1
taker = indexer.take(mask.nonzero()[0])
if sort is False:
# sort bc we want the elements in the same order they are in self
# unnecessary in the case with sort=None bc we will sort later
taker = np.sort(taker)
if isinstance(left_unique, ABCMultiIndex):
result = left_unique.take(taker)
else:
result = left_unique.take(taker)._values
return result
def difference(self, other, sort=None):
"""
Return a new Index with elements of index not in `other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : bool or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Examples
--------
>>> idx1 = pd.Index([2, 1, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Index([1, 2], dtype='int64')
>>> idx1.difference(idx2, sort=False)
Index([2, 1], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
# Note: we do NOT call _dti_setop_align_tzs here, as there
# is no requirement that .difference be commutative, so it does
# not cast to object.
if self.equals(other):
# Note: we do not (yet) sort even if sort=None GH#24959
return self[:0].rename(result_name)
if len(other) == 0:
# Note: we do not (yet) sort even if sort=None GH#24959
result = self.rename(result_name)
if sort is True:
return result.sort_values()
return result
if not self._should_compare(other):
# Nothing matches -> difference is everything
result = self.rename(result_name)
if sort is True:
return result.sort_values()
return result
result = self._difference(other, sort=sort)
return self._wrap_difference_result(other, result)
def _difference(self, other, sort):
# overridden by RangeIndex
this = self.unique()
indexer = this.get_indexer_for(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
the_diff: MultiIndex | ArrayLike
if isinstance(this, ABCMultiIndex):
the_diff = this.take(label_diff)
else:
the_diff = this._values.take(label_diff)
the_diff = _maybe_try_sort(the_diff, sort)
return the_diff
def _wrap_difference_result(self, other, result):
# We will override for MultiIndex to handle empty results
return self._wrap_setop_result(other, result)
def symmetric_difference(self, other, result_name=None, sort=None):
"""
Compute the symmetric difference of two Index objects.
Parameters
----------
other : Index or array-like
result_name : str
sort : bool or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Index([1, 5], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
if not is_dtype_equal(self.dtype, other.dtype):
self, other = self._dti_setop_align_tzs(other, "symmetric_difference")
if not self._should_compare(other):
return self.union(other, sort=sort).rename(result_name)
elif not is_dtype_equal(self.dtype, other.dtype):
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
that = other.astype(dtype, copy=False)
return this.symmetric_difference(that, sort=sort).rename(result_name)
this = self.unique()
other = other.unique()
indexer = this.get_indexer_for(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(
np.arange(this.size), common_indexer, assume_unique=True
)
left_diff = this.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.take(right_indexer)
res_values = left_diff.append(right_diff)
result = _maybe_try_sort(res_values, sort)
if not self._is_multi:
return Index(result, name=result_name, dtype=res_values.dtype)
else:
left_diff = cast("MultiIndex", left_diff)
if len(result) == 0:
# result might be an Index, if other was an Index
return left_diff.remove_unused_levels().set_names(result_name)
return result.set_names(result_name)
def _assert_can_do_setop(self, other) -> bool:
if not is_list_like(other):
raise TypeError("Input must be Index or array-like")
return True
def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]:
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = get_op_result_name(self, other)
return other, result_name
# --------------------------------------------------------------------
# Indexing Methods
def get_loc(self, key):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
Returns
-------
int if unique index, slice if monotonic index, else mask
Examples
--------
>>> unique_index = pd.Index(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.Index(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True])
"""
casted_key = self._maybe_cast_indexer(key)
try:
return self._engine.get_loc(casted_key)
except KeyError as err:
raise KeyError(key) from err
except TypeError:
# If we have a listlike key, _check_indexing_error will raise
# InvalidIndexError. Otherwise we fall through and re-raise
# the TypeError.
self._check_indexing_error(key)
raise
_index_shared_docs[
"get_indexer"
] = """
Compute indexer and mask for new index given the current index.
The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
%(raises_section)s
Notes
-----
Returns -1 for unmatched values, for further explanation see the
example below.
Examples
--------
>>> index = pd.Index(['c', 'a', 'b'])
>>> index.get_indexer(['a', 'b', 'x'])
array([ 1, 2, -1])
Notice that the return value is an array of locations in ``index``
and ``x`` is marked by -1, as it is not in ``index``.
"""
def get_indexer(
self,
target,
method: str_t | None = None,
limit: int | None = None,
tolerance=None,
) -> npt.NDArray[np.intp]:
method = clean_reindex_fill_method(method)
orig_target = target
target = self._maybe_cast_listlike_indexer(target)
self._check_indexing_method(method, limit, tolerance)
if not self._index_as_unique:
raise InvalidIndexError(self._requires_unique_msg)
if len(target) == 0:
return np.array([], dtype=np.intp)
if not self._should_compare(target) and not self._should_partial_index(target):
# IntervalIndex get special treatment bc numeric scalars can be
# matched to Interval scalars
return self._get_indexer_non_comparable(target, method=method, unique=True)
if is_categorical_dtype(self.dtype):
# _maybe_cast_listlike_indexer ensures target has our dtype
# (could improve perf by doing _should_compare check earlier?)
assert is_dtype_equal(self.dtype, target.dtype)
indexer = self._engine.get_indexer(target.codes)
if self.hasnans and target.hasnans:
# After _maybe_cast_listlike_indexer, target elements which do not
# belong to some category are changed to NaNs
# Mask to track actual NaN values compared to inserted NaN values
# GH#45361
target_nans = isna(orig_target)
loc = self.get_loc(np.nan)
mask = target.isna()
indexer[target_nans] = loc
indexer[mask & ~target_nans] = -1
return indexer
if is_categorical_dtype(target.dtype):
# potential fastpath
# get an indexer for unique categories then propagate to codes via take_nd
# get_indexer instead of _get_indexer needed for MultiIndex cases
# e.g. test_append_different_columns_types
categories_indexer = self.get_indexer(target.categories)
indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1)
if (not self._is_multi and self.hasnans) and target.hasnans:
# Exclude MultiIndex because hasnans raises NotImplementedError
# we should only get here if we are unique, so loc is an integer
# GH#41934
loc = self.get_loc(np.nan)
mask = target.isna()
indexer[mask] = loc
return ensure_platform_int(indexer)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(
ptarget, method=method, limit=limit, tolerance=tolerance
)
if is_dtype_equal(self.dtype, target.dtype) and self.equals(target):
# Only call equals if we have same dtype to avoid inference/casting
return np.arange(len(target), dtype=np.intp)
if not is_dtype_equal(
self.dtype, target.dtype
) and not self._should_partial_index(target):
# _should_partial_index e.g. IntervalIndex with numeric scalars
# that can be matched to Interval scalars.
dtype = self._find_common_type_compat(target)
this = self.astype(dtype, copy=False)
target = target.astype(dtype, copy=False)
return this._get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
return self._get_indexer(target, method, limit, tolerance)
def _get_indexer(
self,
target: Index,
method: str_t | None = None,
limit: int | None = None,
tolerance=None,
) -> npt.NDArray[np.intp]:
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
if method in ["pad", "backfill"]:
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == "nearest":
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if target._is_multi and self._is_multi:
engine = self._engine
# error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]"
# has no attribute "_extract_level_codes"
tgt_values = engine._extract_level_codes( # type: ignore[union-attr]
target
)
else:
tgt_values = target._get_engine_target()
indexer = self._engine.get_indexer(tgt_values)
return ensure_platform_int(indexer)
def _should_partial_index(self, target: Index) -> bool:
"""
Should we attempt partial-matching indexing?
"""
if is_interval_dtype(self.dtype):
if is_interval_dtype(target.dtype):
return False
# See https://github.com/pandas-dev/pandas/issues/47772 the commented
# out code can be restored (instead of hardcoding `return True`)
# once that issue is fixed
# "Index" has no attribute "left"
# return self.left._should_compare(target) # type: ignore[attr-defined]
return True
return False
def _check_indexing_method(
self,
method: str_t | None,
limit: int | None = None,
tolerance=None,
) -> None:
"""
Raise if we have a get_indexer `method` that is not supported or valid.
"""
if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]:
# in practice the clean_reindex_fill_method call would raise
# before we get here
raise ValueError("Invalid fill method") # pragma: no cover
if self._is_multi:
if method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
if method in ("pad", "backfill"):
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype):
# GH#37871 for now this is only for IntervalIndex and CategoricalIndex
if method is not None:
raise NotImplementedError(
f"method {method} not yet implemented for {type(self).__name__}"
)
if method is None:
if tolerance is not None:
raise ValueError(
"tolerance argument only valid if doing pad, "
"backfill or nearest reindexing"
)
if limit is not None:
raise ValueError(
"limit argument only valid if doing pad, "
"backfill or nearest reindexing"
)
def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray:
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(
f"tolerance argument for {type(self).__name__} with dtype "
f"{self.dtype} must contain numeric elements if it is list type"
)
raise ValueError(
f"tolerance argument for {type(self).__name__} with dtype {self.dtype} "
f"must be numeric if it is a scalar: {repr(tolerance)}"
)
return tolerance
def _get_fill_indexer(
self, target: Index, method: str_t, limit: int | None = None, tolerance=None
) -> npt.NDArray[np.intp]:
if self._is_multi:
# TODO: get_indexer_with_fill docstring says values must be _sorted_
# but that doesn't appear to be enforced
# error: "IndexEngine" has no attribute "get_indexer_with_fill"
engine = self._engine
with warnings.catch_warnings():
# TODO: We need to fix this. Casting to int64 in cython
warnings.filterwarnings("ignore", category=RuntimeWarning)
return engine.get_indexer_with_fill( # type: ignore[union-attr]
target=target._values,
values=self._values,
method=method,
limit=limit,
)
if self.is_monotonic_increasing and target.is_monotonic_increasing:
target_values = target._get_engine_target()
own_values = self._get_engine_target()
if not isinstance(target_values, np.ndarray) or not isinstance(
own_values, np.ndarray
):
raise NotImplementedError
if method == "pad":
indexer = libalgos.pad(own_values, target_values, limit=limit)
else:
# i.e. "backfill"
indexer = libalgos.backfill(own_values, target_values, limit=limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method, limit)
if tolerance is not None and len(self):
indexer = self._filter_indexer_tolerance(target, indexer, tolerance)
return indexer
def _get_fill_indexer_searchsorted(
self, target: Index, method: str_t, limit: int | None = None
) -> npt.NDArray[np.intp]:
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
"""
if limit is not None:
raise ValueError(
f"limit argument for {repr(method)} method only well-defined "
"if index and target are monotonic"
)
side: Literal["left", "right"] = "left" if method == "pad" else "right"
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = indexer == -1
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)
if side == "left":
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(
self, target: Index, limit: int | None, tolerance
) -> npt.NDArray[np.intp]:
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
if not len(self):
return self._get_fill_indexer(target, "pad")
left_indexer = self.get_indexer(target, "pad", limit=limit)
right_indexer = self.get_indexer(target, "backfill", limit=limit)
left_distances = self._difference_compat(target, left_indexer)
right_distances = self._difference_compat(target, right_indexer)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(
# error: Argument 1&2 has incompatible type "Union[ExtensionArray,
# ndarray[Any, Any]]"; expected "Union[SupportsDunderLE,
# SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]"
op(left_distances, right_distances) # type: ignore[arg-type]
| (right_indexer == -1),
left_indexer,
right_indexer,
)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer, tolerance)
return indexer
def _filter_indexer_tolerance(
self,
target: Index,
indexer: npt.NDArray[np.intp],
tolerance,
) -> npt.NDArray[np.intp]:
distance = self._difference_compat(target, indexer)
return np.where(distance <= tolerance, indexer, -1)
def _difference_compat(
self, target: Index, indexer: npt.NDArray[np.intp]
) -> ArrayLike:
# Compatibility for PeriodArray, for which __sub__ returns an ndarray[object]
# of DateOffset objects, which do not support __abs__ (and would be slow
# if they did)
if isinstance(self.dtype, PeriodDtype):
# Note: we only get here with matching dtypes
own_values = cast("PeriodArray", self._data)._ndarray
target_values = cast("PeriodArray", target._data)._ndarray
diff = own_values[indexer] - target_values
else:
# error: Unsupported left operand type for - ("ExtensionArray")
diff = self._values[indexer] - target._values # type: ignore[operator]
return abs(diff)
# --------------------------------------------------------------------
# Indexer Conversion Methods
def _validate_positional_slice(self, key: slice) -> None:
"""
For positional indexing, a slice must have either int or None
for each of start, stop, and step.
"""
self._validate_indexer("positional", key.start, "iloc")
self._validate_indexer("positional", key.stop, "iloc")
self._validate_indexer("positional", key.step, "iloc")
def _convert_slice_indexer(self, key: slice, kind: str_t):
"""
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'loc', 'getitem'}
"""
assert kind in ["loc", "getitem"], kind
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able
# to simplify this.
if isinstance(self.dtype, np.dtype) and is_float_dtype(self.dtype):
# We always treat __getitem__ slicing as label-based
# translate to locations
return self.slice_indexer(start, stop, step)
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_index_slice = is_int(start) and is_int(stop) and is_int(step)
# special case for interval_dtype bc we do not do partial-indexing
# on integer Intervals when slicing
# TODO: write this in terms of e.g. should_partial_index?
ints_are_positional = self._should_fallback_to_positional or is_interval_dtype(
self.dtype
)
is_positional = is_index_slice and ints_are_positional
if kind == "getitem":
# called from the getitem slicers, validate that we are in fact integers
if is_integer_dtype(self.dtype) or is_index_slice:
# Note: these checks are redundant if we know is_index_slice
self._validate_indexer("slice", key.start, "getitem")
self._validate_indexer("slice", key.stop, "getitem")
self._validate_indexer("slice", key.step, "getitem")
return key
# convert the slice to an indexer here
# if we are mixed and have integers
if is_positional:
try:
# Validate start & stop
if start is not None:
self.get_loc(start)
if stop is not None:
self.get_loc(stop)
is_positional = False
except KeyError:
pass
if com.is_null_slice(key):
# It doesn't matter if we are positional or label based
indexer = key
elif is_positional:
if kind == "loc":
# GH#16121, GH#24612, GH#31810
raise TypeError(
"Slicing a positional slice with .loc is not allowed, "
"Use .loc with labels or .iloc with positions instead.",
)
indexer = key
else:
indexer = self.slice_indexer(start, stop, step)
return indexer
def _raise_invalid_indexer(
self,
form: str_t,
key,
reraise: lib.NoDefault | None | Exception = lib.no_default,
) -> None:
"""
Raise consistent invalid indexer message.
"""
msg = (
f"cannot do {form} indexing on {type(self).__name__} with these "
f"indexers [{key}] of type {type(key).__name__}"
)
if reraise is not lib.no_default:
raise TypeError(msg) from reraise
raise TypeError(msg)
# --------------------------------------------------------------------
# Reindex Methods
def _validate_can_reindex(self, indexer: np.ndarray) -> None:
"""
Check if we are allowing reindexing with this particular indexer.
Parameters
----------
indexer : an integer ndarray
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self._index_as_unique and len(indexer):
raise ValueError("cannot reindex on an axis with duplicate labels")
def reindex(
self, target, method=None, level=None, limit=None, tolerance=None
) -> tuple[Index, npt.NDArray[np.intp] | None]:
"""
Create index with target's values.
Parameters
----------
target : an iterable
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
level : int, optional
Level of multiindex.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : int or float, optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray[np.intp] or None
Indices of output values in original index.
Raises
------
TypeError
If ``method`` passed along with ``level``.
ValueError
If non-unique multi-index
ValueError
If non-unique index and ``method`` or ``limit`` passed.
See Also
--------
Series.reindex : Conform Series to new index with optional filling logic.
DataFrame.reindex : Conform DataFrame to new index with optional filling logic.
Examples
--------
>>> idx = pd.Index(['car', 'bike', 'train', 'tractor'])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.reindex(['car', 'bike'])
(Index(['car', 'bike'], dtype='object'), array([0, 1]))
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "name")
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
if level is not None and self._is_multi:
# "Index" has no attribute "levels"; maybe "nlevels"?
idx = self.levels[level] # type: ignore[attr-defined]
else:
idx = self
target = idx[:0]
else:
target = ensure_index(target)
if level is not None and (
isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex)
):
if method is not None:
raise TypeError("Fill method not supported if level passed")
# TODO: tests where passing `keep_order=not self._is_multi`
# makes a difference for non-MultiIndex case
target, indexer, _ = self._join_level(
target, level, how="right", keep_order=not self._is_multi
)
else:
if self.equals(target):
indexer = None
else:
if self._index_as_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
elif self._is_multi:
raise ValueError("cannot handle a non-unique multi-index!")
elif not self.is_unique:
# GH#42568
raise ValueError("cannot reindex on an axis with duplicate labels")
else:
indexer, _ = self.get_indexer_non_unique(target)
target = self._wrap_reindex_result(target, indexer, preserve_names)
return target, indexer
def _wrap_reindex_result(self, target, indexer, preserve_names: bool):
target = self._maybe_preserve_names(target, preserve_names)
return target
def _maybe_preserve_names(self, target: Index, preserve_names: bool):
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy(deep=False)
target.name = self.name
return target
def _reindex_non_unique(
self, target: Index
) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]:
"""
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray[np.intp]
Indices of output values in original index.
new_indexer : np.ndarray[np.intp] or None
"""
target = ensure_index(target)
if len(target) == 0:
# GH#13691
return self[:0], np.array([], dtype=np.intp), None
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer), dtype=np.intp)
missing = ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = length[~check]
cur_labels = self.take(indexer[check]).values
cur_indexer = length[check]
# Index constructor below will do inference
new_labels = np.empty((len(indexer),), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# GH#38906
if not len(self):
new_indexer = np.arange(0, dtype=np.intp)
# a unique indexer
elif target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer), dtype=np.intp)
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = -1
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp)
new_indexer[~check] = -1
if not isinstance(self, ABCMultiIndex):
new_index = Index(new_labels, name=self.name)
else:
new_index = type(self).from_tuples(new_labels, names=self.names)
return new_index, indexer, new_indexer
# --------------------------------------------------------------------
# Join Methods
def join(
self,
other: Index,
*,
how: JoinHow = ...,
level: Level = ...,
return_indexers: Literal[True],
sort: bool = ...,
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
...
def join(
self,
other: Index,
*,
how: JoinHow = ...,
level: Level = ...,
return_indexers: Literal[False] = ...,
sort: bool = ...,
) -> Index:
...
def join(
self,
other: Index,
*,
how: JoinHow = ...,
level: Level = ...,
return_indexers: bool = ...,
sort: bool = ...,
) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
...
def join(
self,
other: Index,
*,
how: JoinHow = "left",
level: Level = None,
return_indexers: bool = False,
sort: bool = False,
) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
Compute join_index and indexers to conform data structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : bool, default False
sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword).
Returns
-------
join_index, (left_indexer, right_indexer)
"""
other = ensure_index(other)
if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):
if (self.tz is None) ^ (other.tz is None):
# Raise instead of casting to object below.
raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
if not self._is_multi and not other._is_multi:
# We have specific handling for MultiIndex below
pself, pother = self._maybe_promote(other)
if pself is not self or pother is not other:
return pself.join(
pother, how=how, level=level, return_indexers=True, sort=sort
)
lindexer: np.ndarray | None
rindexer: np.ndarray | None
# try to figure out the join level
# GH3662
if level is None and (self._is_multi or other._is_multi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how)
# join on the level
if level is not None and (self._is_multi or other._is_multi):
return self._join_level(other, level, how=how)
if len(other) == 0:
if how in ("left", "outer"):
join_index = self._view()
rindexer = np.broadcast_to(np.intp(-1), len(join_index))
return join_index, None, rindexer
elif how in ("right", "inner", "cross"):
join_index = other._view()
lindexer = np.array([])
return join_index, lindexer, None
if len(self) == 0:
if how in ("right", "outer"):
join_index = other._view()
lindexer = np.broadcast_to(np.intp(-1), len(join_index))
return join_index, lindexer, None
elif how in ("left", "inner", "cross"):
join_index = self._view()
rindexer = np.array([])
return join_index, None, rindexer
if self._join_precedence < other._join_precedence:
flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"}
how = flip.get(how, how)
join_index, lidx, ridx = other.join(
self, how=how, level=level, return_indexers=True
)
lidx, ridx = ridx, lidx
return join_index, lidx, ridx
if not is_dtype_equal(self.dtype, other.dtype):
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
other = other.astype(dtype, copy=False)
return this.join(other, how=how, return_indexers=True)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic_increasing and other.is_monotonic_increasing:
if not is_interval_dtype(self.dtype):
# otherwise we will fall through to _join_via_get_indexer
# GH#39133
# go through object dtype for ea till engine is supported properly
return self._join_monotonic(other, how=how)
else:
return self._join_non_unique(other, how=how)
elif (
# GH48504: exclude MultiIndex to avoid going through MultiIndex._values
self.is_monotonic_increasing
and other.is_monotonic_increasing
and self._can_use_libjoin
and not isinstance(self, ABCMultiIndex)
and not is_categorical_dtype(self.dtype)
):
# Categorical is monotonic if data are ordered as categories, but join can
# not handle this in case of not lexicographically monotonic GH#38502
try:
return self._join_monotonic(other, how=how)
except TypeError:
# object dtype; non-comparable objects
pass
return self._join_via_get_indexer(other, how, sort)
def _join_via_get_indexer(
self, other: Index, how: JoinHow, sort: bool
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
# Fallback if we do not have any fastpaths available based on
# uniqueness/monotonicity
# Note: at this point we have checked matching dtypes
if how == "left":
join_index = self
elif how == "right":
join_index = other
elif how == "inner":
# TODO: sort=False here for backwards compat. It may
# be better to use the sort parameter passed into join
join_index = self.intersection(other, sort=False)
elif how == "outer":
# TODO: sort=True here for backwards compat. It may
# be better to use the sort parameter passed into join
join_index = self.union(other)
if sort:
join_index = join_index.sort_values()
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer_for(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer_for(join_index)
return join_index, lindexer, rindexer
def _join_multi(self, other: Index, how: JoinHow):
from pandas.core.indexes.multi import MultiIndex
from pandas.core.reshape.merge import restore_dropped_levels_multijoin
# figure out join names
self_names_list = list(com.not_none(*self.names))
other_names_list = list(com.not_none(*other.names))
self_names_order = self_names_list.index
other_names_order = other_names_list.index
self_names = set(self_names_list)
other_names = set(other_names_list)
overlap = self_names & other_names
# need at least 1 in common
if not overlap:
raise ValueError("cannot join with no overlapping index names")
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
# Drop the non-matching levels from left and right respectively
ldrop_names = sorted(self_names - overlap, key=self_names_order)
rdrop_names = sorted(other_names - overlap, key=other_names_order)
# if only the order differs
if not len(ldrop_names + rdrop_names):
self_jnlevels = self
other_jnlevels = other.reorder_levels(self.names)
else:
self_jnlevels = self.droplevel(ldrop_names)
other_jnlevels = other.droplevel(rdrop_names)
# Join left and right
# Join on same leveled multi-index frames is supported
join_idx, lidx, ridx = self_jnlevels.join(
other_jnlevels, how=how, return_indexers=True
)
# Restore the dropped levels
# Returned index level order is
# common levels, ldrop_names, rdrop_names
dropped_names = ldrop_names + rdrop_names
# error: Argument 5/6 to "restore_dropped_levels_multijoin" has
# incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any
# ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]"
levels, codes, names = restore_dropped_levels_multijoin(
self,
other,
dropped_names,
join_idx,
lidx, # type: ignore[arg-type]
ridx, # type: ignore[arg-type]
)
# Re-create the multi-index
multi_join_idx = MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=False
)
multi_join_idx = multi_join_idx.remove_unused_levels()
return multi_join_idx, lidx, ridx
jl = list(overlap)[0]
# Case where only one index is multi
# make the indices into mi's that match
flip_order = False
if isinstance(self, MultiIndex):
self, other = other, self
flip_order = True
# flip if join method is right or left
flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"}
how = flip.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how)
if flip_order:
return result[0], result[2], result[1]
return result
def _join_non_unique(
self, other: Index, how: JoinHow = "left"
) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
from pandas.core.reshape.merge import get_join_indexers
# We only get here if dtypes match
assert self.dtype == other.dtype
left_idx, right_idx = get_join_indexers(
[self._values], [other._values], how=how, sort=True
)
mask = left_idx == -1
join_idx = self.take(left_idx)
right = other.take(right_idx)
join_index = join_idx.putmask(mask, right)
return join_index, left_idx, right_idx
def _join_level(
self, other: Index, level, how: JoinHow = "left", keep_order: bool = True
) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex.
If ```keep_order == True```, the order of the data indexed by the
MultiIndex will not be changed; otherwise, it will tie out
with `other`.
"""
from pandas.core.indexes.multi import MultiIndex
def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]:
"""
Returns sorter for the inner most level while preserving the
order of higher levels.
Parameters
----------
labels : list[np.ndarray]
Each ndarray has signed integer dtype, not necessarily identical.
Returns
-------
np.ndarray[np.intp]
"""
if labels[0].size == 0:
return np.empty(0, dtype=np.intp)
if len(labels) == 1:
return get_group_index_sorter(ensure_platform_int(labels[0]))
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = ensure_int64(labels[-1])
return lib.get_level_sorter(lab, ensure_platform_int(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError("Join on level between two MultiIndex objects is ambiguous")
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"}
how = flip.get(how, how)
assert isinstance(left, MultiIndex)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError(
"Index._join_level on non-unique index is not implemented"
)
new_level, left_lev_indexer, right_lev_indexer = old_level.join(
right, how=how, return_indexers=True
)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.codes[: level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = ensure_platform_int(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level))
old_codes = left.codes[level]
taker = old_codes[old_codes != -1]
new_lev_codes = rev_indexer.take(taker)
new_codes = list(left.codes)
new_codes[level] = new_lev_codes
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
left_indexer = cast(np.ndarray, left_indexer)
mask = new_lev_codes != -1
if not mask.all():
new_codes = [lab[mask] for lab in new_codes]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max()
ngroups = 1 + max_new_lev
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_codes, ngroups
)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0] :]
new_codes = [lab[left_indexer] for lab in new_codes]
else: # sort the leaves
mask = new_lev_codes != -1
mask_all = mask.all()
if not mask_all:
new_codes = [lab[mask] for lab in new_codes]
left_indexer = _get_leaf_sorter(new_codes[: level + 1])
new_codes = [lab[left_indexer] for lab in new_codes]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(
levels=new_levels,
codes=new_codes,
names=left.names,
verify_integrity=False,
)
if right_lev_indexer is not None:
right_indexer = right_lev_indexer.take(join_index.codes[level])
else:
right_indexer = join_index.codes[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
left_indexer = (
None if left_indexer is None else ensure_platform_int(left_indexer)
)
right_indexer = (
None if right_indexer is None else ensure_platform_int(right_indexer)
)
return join_index, left_indexer, right_indexer
def _join_monotonic(
self, other: Index, how: JoinHow = "left"
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
# We only get here with matching dtypes and both monotonic increasing
assert other.dtype == self.dtype
if self.equals(other):
# This is a convenient place for this check, but its correctness
# does not depend on monotonicity, so it could go earlier
# in the calling method.
ret_index = other if how == "right" else self
return ret_index, None, None
ridx: npt.NDArray[np.intp] | None
lidx: npt.NDArray[np.intp] | None
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == "left":
join_index = self
lidx = None
ridx = self._left_indexer_unique(other)
elif how == "right":
join_index = other
lidx = other._left_indexer_unique(self)
ridx = None
elif how == "inner":
join_array, lidx, ridx = self._inner_indexer(other)
join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
elif how == "outer":
join_array, lidx, ridx = self._outer_indexer(other)
join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
else:
if how == "left":
join_array, lidx, ridx = self._left_indexer(other)
elif how == "right":
join_array, ridx, lidx = other._left_indexer(self)
elif how == "inner":
join_array, lidx, ridx = self._inner_indexer(other)
elif how == "outer":
join_array, lidx, ridx = self._outer_indexer(other)
assert lidx is not None
assert ridx is not None
join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
lidx = None if lidx is None else ensure_platform_int(lidx)
ridx = None if ridx is None else ensure_platform_int(ridx)
return join_index, lidx, ridx
def _wrap_joined_index(
self: _IndexT,
joined: ArrayLike,
other: _IndexT,
lidx: npt.NDArray[np.intp],
ridx: npt.NDArray[np.intp],
) -> _IndexT:
assert other.dtype == self.dtype
if isinstance(self, ABCMultiIndex):
name = self.names if self.names == other.names else None
# error: Incompatible return value type (got "MultiIndex",
# expected "_IndexT")
mask = lidx == -1
join_idx = self.take(lidx)
right = other.take(ridx)
join_index = join_idx.putmask(mask, right)
return join_index.set_names(name) # type: ignore[return-value]
else:
name = get_op_result_name(self, other)
return self._constructor._with_infer(joined, name=name, dtype=self.dtype)
def _can_use_libjoin(self) -> bool:
"""
Whether we can use the fastpaths implement in _libs.join
"""
if type(self) is Index:
# excludes EAs, but include masks, we get here with monotonic
# values only, meaning no NA
return (
isinstance(self.dtype, np.dtype)
or isinstance(self.values, BaseMaskedArray)
or isinstance(self._values, ArrowExtensionArray)
)
return not is_interval_dtype(self.dtype)
# --------------------------------------------------------------------
# Uncategorized Methods
def values(self) -> ArrayLike:
"""
Return an array representing the data in the Index.
.. warning::
We recommend using :attr:`Index.array` or
:meth:`Index.to_numpy`, depending on whether you need
a reference to the underlying data or a NumPy array.
Returns
-------
array: numpy.ndarray or ExtensionArray
See Also
--------
Index.array : Reference to the underlying data.
Index.to_numpy : A NumPy array representing the underlying data.
"""
return self._data
def array(self) -> ExtensionArray:
array = self._data
if isinstance(array, np.ndarray):
from pandas.core.arrays.numpy_ import PandasArray
array = PandasArray(array)
return array
def _values(self) -> ExtensionArray | np.ndarray:
"""
The best array representation.
This is an ndarray or ExtensionArray.
``_values`` are consistent between ``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values |
----------------- | --------------- | ------------- |
Index | ndarray | ndarray |
CategoricalIndex | Categorical | Categorical |
DatetimeIndex | ndarray[M8ns] | DatetimeArray |
DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray |
PeriodIndex | ndarray[object] | PeriodArray |
IntervalIndex | IntervalArray | IntervalArray |
See Also
--------
values : Values
"""
return self._data
def _get_engine_target(self) -> ArrayLike:
"""
Get the ndarray or ExtensionArray that we can pass to the IndexEngine
constructor.
"""
vals = self._values
if isinstance(vals, StringArray):
# GH#45652 much more performant than ExtensionEngine
return vals._ndarray
if (
type(self) is Index
and isinstance(self._values, ExtensionArray)
and not isinstance(self._values, BaseMaskedArray)
and not (
isinstance(self._values, ArrowExtensionArray)
and is_numeric_dtype(self.dtype)
# Exclude decimal
and self.dtype.kind != "O"
)
):
# TODO(ExtensionIndex): remove special-case, just use self._values
return self._values.astype(object)
return vals
def _get_join_target(self) -> ArrayLike:
"""
Get the ndarray or ExtensionArray that we can pass to the join
functions.
"""
if isinstance(self._values, BaseMaskedArray):
# This is only used if our array is monotonic, so no NAs present
return self._values._data
elif isinstance(self._values, ArrowExtensionArray):
# This is only used if our array is monotonic, so no missing values
# present
return self._values.to_numpy()
return self._get_engine_target()
def _from_join_target(self, result: np.ndarray) -> ArrayLike:
"""
Cast the ndarray returned from one of the libjoin.foo_indexer functions
back to type(self)._data.
"""
if isinstance(self.values, BaseMaskedArray):
return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_))
elif isinstance(self.values, ArrowExtensionArray):
return type(self.values)._from_sequence(result)
return result
def memory_usage(self, deep: bool = False) -> int:
result = self._memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
def where(self, cond, other=None) -> Index:
"""
Replace values where the condition is False.
The replacement is taken from other.
Parameters
----------
cond : bool array-like with the same length as self
Condition to select the values on.
other : scalar, or array-like, default None
Replacement if the condition is False.
Returns
-------
pandas.Index
A copy of self with values replaced from other
where the condition is False.
See Also
--------
Series.where : Same method for Series.
DataFrame.where : Same method for DataFrame.
Examples
--------
>>> idx = pd.Index(['car', 'bike', 'train', 'tractor'])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.where(idx.isin(['car', 'train']), 'other')
Index(['car', 'other', 'train', 'other'], dtype='object')
"""
if isinstance(self, ABCMultiIndex):
raise NotImplementedError(
".where is not supported for MultiIndex operations"
)
cond = np.asarray(cond, dtype=bool)
return self.putmask(~cond, other)
# construction helpers
def _raise_scalar_data_error(cls, data):
# We return the TypeError so that we can raise it from the constructor
# in order to keep mypy happy
raise TypeError(
f"{cls.__name__}(...) must be called with a collection of some "
f"kind, {repr(data)} was passed"
)
def _validate_fill_value(self, value):
"""
Check if the value can be inserted into our array without casting,
and convert it to an appropriate native type if necessary.
Raises
------
TypeError
If the value cannot be inserted into an array of this dtype.
"""
dtype = self.dtype
if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]:
# return np_can_hold_element(dtype, value)
try:
return np_can_hold_element(dtype, value)
except LossySetitemError as err:
# re-raise as TypeError for consistency
raise TypeError from err
elif not can_hold_element(self._values, value):
raise TypeError
return value
def _require_scalar(self, value):
"""
Check that this is a scalar value that we can use for setitem-like
operations without changing dtype.
"""
if not is_scalar(value):
raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}")
return value
def _is_memory_usage_qualified(self) -> bool:
"""
Return a boolean if we need a qualified .info display.
"""
return is_object_dtype(self.dtype)
def __contains__(self, key: Any) -> bool:
"""
Return a boolean indicating whether the provided key is in the index.
Parameters
----------
key : label
The key to check if it is present in the index.
Returns
-------
bool
Whether the key search is in the index.
Raises
------
TypeError
If the key is not hashable.
See Also
--------
Index.isin : Returns an ndarray of boolean dtype indicating whether the
list-like key is in the index.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> 2 in idx
True
>>> 6 in idx
False
"""
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
# https://github.com/python/typeshed/issues/2148#issuecomment-520783318
# Incompatible types in assignment (expression has type "None", base class
# "object" defined the type as "Callable[[object], int]")
__hash__: ClassVar[None] # type: ignore[assignment]
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
getitem = self._data.__getitem__
if is_integer(key) or is_float(key):
# GH#44051 exclude bool, which would return a 2d ndarray
key = com.cast_scalar_indexer(key)
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization com.is_bool_indexer and ndim checks.
result = getitem(key)
# Going through simple_new for performance.
return type(self)._simple_new(
result, name=self._name, refs=self._references
)
if com.is_bool_indexer(key):
# if we have list[bools, length=1e5] then doing this check+convert
# takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__
# time below from 3.8 ms to 496 µs
# if we already have ndarray[bool], the overhead is 1.4 µs or .25%
if is_extension_array_dtype(getattr(key, "dtype", None)):
key = key.to_numpy(dtype=bool, na_value=False)
else:
key = np.asarray(key, dtype=bool)
result = getitem(key)
# Because we ruled out integer above, we always get an arraylike here
if result.ndim > 1:
disallow_ndim_indexing(result)
# NB: Using _constructor._simple_new would break if MultiIndex
# didn't override __getitem__
return self._constructor._simple_new(result, name=self._name)
def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._data[slobj]
return type(self)._simple_new(res, name=self._name, refs=self._references)
def _can_hold_identifiers_and_holds_name(self, name) -> bool:
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if (
is_object_dtype(self.dtype)
or is_string_dtype(self.dtype)
or is_categorical_dtype(self.dtype)
):
return name in self
return False
def append(self, other: Index | Sequence[Index]) -> Index:
"""
Append a collection of Index options together.
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat += list(other)
else:
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[Index, Sequence[Index]]"; expected "Index"
to_concat.append(other) # type: ignore[arg-type]
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError("all inputs must be Index")
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
"""
Concatenate multiple Index objects.
"""
to_concat_vals = [x._values for x in to_concat]
result = concat_compat(to_concat_vals)
return Index._with_infer(result, name=name)
def putmask(self, mask, value) -> Index:
"""
Return a new Index of the values set with the mask.
Returns
-------
Index
See Also
--------
numpy.ndarray.putmask : Changes elements of an array
based on conditional and input values.
"""
mask, noop = validate_putmask(self._values, mask)
if noop:
return self.copy()
if self.dtype != object and is_valid_na_for_dtype(value, self.dtype):
# e.g. None -> np.nan, see also Block._standardize_fill_value
value = self._na_value
try:
converted = self._validate_fill_value(value)
except (LossySetitemError, ValueError, TypeError) as err:
if is_object_dtype(self): # pragma: no cover
raise err
# See also: Block.coerce_to_target_dtype
dtype = self._find_common_type_compat(value)
return self.astype(dtype).putmask(mask, value)
values = self._values.copy()
if isinstance(values, np.ndarray):
converted = setitem_datetimelike_compat(values, mask.sum(), converted)
np.putmask(values, mask, converted)
else:
# Note: we use the original value here, not converted, as
# _validate_fill_value is not idempotent
values._putmask(mask, value)
return self._shallow_copy(values)
def equals(self, other: Any) -> bool:
"""
Determine if two Index object are equal.
The things that are being compared are:
* The elements inside the Index object.
* The order of the elements inside the Index object.
Parameters
----------
other : Any
The other object to compare against.
Returns
-------
bool
True if "other" is an Index and it has the same elements and order
as the calling index; False otherwise.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx1
Index([1, 2, 3], dtype='int64')
>>> idx1.equals(pd.Index([1, 2, 3]))
True
The elements inside are compared
>>> idx2 = pd.Index(["1", "2", "3"])
>>> idx2
Index(['1', '2', '3'], dtype='object')
>>> idx1.equals(idx2)
False
The order is compared
>>> ascending_idx = pd.Index([1, 2, 3])
>>> ascending_idx
Index([1, 2, 3], dtype='int64')
>>> descending_idx = pd.Index([3, 2, 1])
>>> descending_idx
Index([3, 2, 1], dtype='int64')
>>> ascending_idx.equals(descending_idx)
False
The dtype is *not* compared
>>> int64_idx = pd.Index([1, 2, 3], dtype='int64')
>>> int64_idx
Index([1, 2, 3], dtype='int64')
>>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64')
>>> uint64_idx
Index([1, 2, 3], dtype='uint64')
>>> int64_idx.equals(uint64_idx)
True
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype):
# if other is not object, use other's logic for coercion
return other.equals(self)
if isinstance(other, ABCMultiIndex):
# d-level MultiIndex can equal d-tuple Index
return other.equals(self)
if isinstance(self._values, ExtensionArray):
# Dispatch to the ExtensionArray's .equals method.
if not isinstance(other, type(self)):
return False
earr = cast(ExtensionArray, self._data)
return earr.equals(other._data)
if is_extension_array_dtype(other.dtype):
# All EA-backed Index subclasses override equals
return other.equals(self)
return array_equivalent(self._values, other._values)
def identical(self, other) -> bool:
"""
Similar to equals, but checks that object attributes and types are also equal.
Returns
-------
bool
If two Index objects have equal elements and same type True,
otherwise False.
"""
return (
self.equals(other)
and all(
getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables
)
and type(self) == type(other)
and self.dtype == other.dtype
)
def asof(self, label):
"""
Return the label from the index, or, if not present, the previous one.
Assuming that the index is sorted, return the passed index label if it
is in the index, or return the previous index label if the passed one
is not in the index.
Parameters
----------
label : object
The label up to which the method returns the latest index label.
Returns
-------
object
The passed label if it is in the index. The previous label if the
passed label is not in the sorted index or `NaN` if there is no
such label.
See Also
--------
Series.asof : Return the latest value in a Series up to the
passed index.
merge_asof : Perform an asof merge (similar to left join but it
matches on nearest key rather than equal key).
Index.get_loc : An `asof` is a thin wrapper around `get_loc`
with method='pad'.
Examples
--------
`Index.asof` returns the latest index label up to the passed label.
>>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03'])
>>> idx.asof('2014-01-01')
'2013-12-31'
If the label is in the index, the method returns the passed label.
>>> idx.asof('2014-01-02')
'2014-01-02'
If all of the labels in the index are later than the passed label,
NaN is returned.
>>> idx.asof('1999-01-02')
nan
If the index is not sorted, an error is raised.
>>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02',
... '2014-01-03'])
>>> idx_not_sorted.asof('2013-12-31')
Traceback (most recent call last):
ValueError: index must be monotonic increasing or decreasing
"""
self._searchsorted_monotonic(label) # validate sortedness
try:
loc = self.get_loc(label)
except (KeyError, TypeError):
# KeyError -> No exact match, try for padded
# TypeError -> passed e.g. non-hashable, fall through to get
# the tested exception message
indexer = self.get_indexer([label], method="pad")
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError("asof requires scalar valued input")
loc = indexer.item()
if loc == -1:
return self._na_value
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(
self, where: Index, mask: npt.NDArray[np.bool_]
) -> npt.NDArray[np.intp]:
"""
Return the locations (indices) of labels in the index.
As in the `asof` function, if the label (a particular entry in
`where`) is not in the index, the latest index label up to the
passed label is chosen and its index returned.
If all of the labels in the index are later than a label in `where`,
-1 is returned.
`mask` is used to ignore NA values in the index during calculation.
Parameters
----------
where : Index
An Index consisting of an array of timestamps.
mask : np.ndarray[bool]
Array of booleans denoting where values in the original
data are not NA.
Returns
-------
np.ndarray[np.intp]
An array of locations (indices) of the labels from the Index
which correspond to the return values of the `asof` function
for every element in `where`.
"""
# error: No overload variant of "searchsorted" of "ndarray" matches argument
# types "Union[ExtensionArray, ndarray[Any, Any]]", "str"
# TODO: will be fixed when ExtensionArray.searchsorted() is fixed
locs = self._values[mask].searchsorted(
where._values, side="right" # type: ignore[call-overload]
)
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self), dtype=np.intp)[mask].take(locs)
first_value = self._values[mask.argmax()]
result[(locs == 0) & (where._values < first_value)] = -1
return result
def sort_values(
self,
return_indexer: bool = False,
ascending: bool = True,
na_position: str_t = "last",
key: Callable | None = None,
):
"""
Return a sorted copy of the index.
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
.. versionadded:: 1.2.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape.
.. versionadded:: 1.1.0
Returns
-------
sorted_index : pandas.Index
Sorted copy of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
Series.sort_values : Sort values of a Series.
DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = pd.Index([10, 100, 1, 1000])
>>> idx
Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
idx = ensure_key_mapped(self, key)
# GH 35584. Sort missing values according to na_position kwarg
# ignore na_position for MultiIndex
if not isinstance(self, ABCMultiIndex):
_as = nargsort(
items=idx, ascending=ascending, na_position=na_position, key=key
)
else:
_as = idx.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
"""
Use sort_values instead.
"""
raise TypeError("cannot sort an Index object in-place, use sort_values instead")
def shift(self, periods: int = 1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or str, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.Index
Shifted index.
See Also
--------
Series.shift : Shift values of Series.
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shift(10, freq='D')
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shift(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
"""
raise NotImplementedError(
f"This method is only implemented for DatetimeIndex, PeriodIndex and "
f"TimedeltaIndex; Got type {type(self).__name__}"
)
def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
np.ndarray[np.intp]
Integer indices that would sort the index if used as
an indexer.
See Also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_values : Return sorted copy of Index.
Examples
--------
>>> idx = pd.Index(['b', 'a', 'd', 'c'])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
# This works for either ndarray or EA, is overridden
# by RangeIndex, MultIIndex
return self._data.argsort(*args, **kwargs)
def _check_indexing_error(self, key):
if not is_scalar(key):
# if key is not a scalar, directly raise an error (the code below
# would convert to numpy arrays and raise later any way) - GH29926
raise InvalidIndexError(key)
def _should_fallback_to_positional(self) -> bool:
"""
Should an integer key be treated as positional?
"""
return self.inferred_type not in {
"integer",
"mixed-integer",
"floating",
"complex",
}
_index_shared_docs[
"get_indexer_non_unique"
] = """
Compute indexer and mask for new index given the current index.
The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
Returns
-------
indexer : np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : np.ndarray[np.intp]
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array.
Examples
--------
>>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])
>>> index.get_indexer_non_unique(['b', 'b'])
(array([1, 3, 4, 1, 3, 4]), array([], dtype=int64))
In the example below there are no matched values.
>>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])
>>> index.get_indexer_non_unique(['q', 'r', 't'])
(array([-1, -1, -1]), array([0, 1, 2]))
For this reason, the returned ``indexer`` contains only integers equal to -1.
It demonstrates that there's no match between the index and the ``target``
values at these positions. The mask [0, 1, 2] in the return value shows that
the first, second, and third elements are missing.
Notice that the return value is a tuple contains two items. In the example
below the first item is an array of locations in ``index``. The second
item is a mask shows that the first and third elements are missing.
>>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])
>>> index.get_indexer_non_unique(['f', 'b', 's'])
(array([-1, 1, 3, 4, -1]), array([0, 2]))
"""
def get_indexer_non_unique(
self, target
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
target = ensure_index(target)
target = self._maybe_cast_listlike_indexer(target)
if not self._should_compare(target) and not self._should_partial_index(target):
# _should_partial_index e.g. IntervalIndex with numeric scalars
# that can be matched to Interval scalars.
return self._get_indexer_non_comparable(target, method=None, unique=False)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if not is_dtype_equal(self.dtype, target.dtype):
# TODO: if object, could use infer_dtype to preempt costly
# conversion if still non-comparable?
dtype = self._find_common_type_compat(target)
this = self.astype(dtype, copy=False)
that = target.astype(dtype, copy=False)
return this.get_indexer_non_unique(that)
# TODO: get_indexer has fastpaths for both Categorical-self and
# Categorical-target. Can we do something similar here?
# Note: _maybe_promote ensures we never get here with MultiIndex
# self and non-Multi target
tgt_values = target._get_engine_target()
if self._is_multi and target._is_multi:
engine = self._engine
# Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has
# no attribute "_extract_level_codes"
tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr]
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return ensure_platform_int(indexer), ensure_platform_int(missing)
def get_indexer_for(self, target) -> npt.NDArray[np.intp]:
"""
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_non_unique
as appropriate.
Returns
-------
np.ndarray[np.intp]
List of indices.
Examples
--------
>>> idx = pd.Index([np.nan, 'var1', np.nan])
>>> idx.get_indexer_for([np.nan])
array([0, 2])
"""
if self._index_as_unique:
return self.get_indexer(target)
indexer, _ = self.get_indexer_non_unique(target)
return indexer
def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]:
"""
Analogue to get_indexer that raises if any elements are missing.
"""
keyarr = key
if not isinstance(keyarr, Index):
keyarr = com.asarray_tuplesafe(keyarr)
if self._index_as_unique:
indexer = self.get_indexer_for(keyarr)
keyarr = self.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr)
self._raise_if_missing(keyarr, indexer, axis_name)
keyarr = self.take(indexer)
if isinstance(key, Index):
# GH 42790 - Preserve name from an Index
keyarr.name = key.name
if keyarr.dtype.kind in ["m", "M"]:
# DTI/TDI.take can infer a freq in some cases when we dont want one
if isinstance(key, list) or (
isinstance(key, type(self))
# "Index" has no attribute "freq"
and key.freq is None # type: ignore[attr-defined]
):
keyarr = keyarr._with_freq(None)
return keyarr, indexer
def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None:
"""
Check that indexer can be used to return a result.
e.g. at least one element was found,
unless the list of keys was actually empty.
Parameters
----------
key : list-like
Targeted labels (only used to show correct error message).
indexer: array-like of booleans
Indices corresponding to the key,
(with -1 indicating not found).
axis_name : str
Raises
------
KeyError
If at least one key was requested but none was found.
"""
if len(key) == 0:
return
# Count missing values
missing_mask = indexer < 0
nmissing = missing_mask.sum()
if nmissing:
# TODO: remove special-case; this is just to keep exception
# message tests from raising while debugging
use_interval_msg = is_interval_dtype(self.dtype) or (
is_categorical_dtype(self.dtype)
# "Index" has no attribute "categories" [attr-defined]
and is_interval_dtype(
self.categories.dtype # type: ignore[attr-defined]
)
)
if nmissing == len(indexer):
if use_interval_msg:
key = list(key)
raise KeyError(f"None of [{key}] are in the [{axis_name}]")
not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique())
raise KeyError(f"{not_found} not in index")
def _get_indexer_non_comparable(
self, target: Index, method, unique: Literal[True] = ...
) -> npt.NDArray[np.intp]:
...
def _get_indexer_non_comparable(
self, target: Index, method, unique: Literal[False]
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
...
def _get_indexer_non_comparable(
self, target: Index, method, unique: bool = True
) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
...
def _get_indexer_non_comparable(
self, target: Index, method, unique: bool = True
) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""
Called from get_indexer or get_indexer_non_unique when the target
is of a non-comparable dtype.
For get_indexer lookups with method=None, get_indexer is an _equality_
check, so non-comparable dtypes mean we will always have no matches.
For get_indexer lookups with a method, get_indexer is an _inequality_
check, so non-comparable dtypes mean we will always raise TypeError.
Parameters
----------
target : Index
method : str or None
unique : bool, default True
* True if called from get_indexer.
* False if called from get_indexer_non_unique.
Raises
------
TypeError
If doing an inequality check, i.e. method is not None.
"""
if method is not None:
other = _unpack_nested_dtype(target)
raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}")
no_matches = -1 * np.ones(target.shape, dtype=np.intp)
if unique:
# This is for get_indexer
return no_matches
else:
# This is for get_indexer_non_unique
missing = np.arange(len(target), dtype=np.intp)
return no_matches, missing
def _index_as_unique(self) -> bool:
"""
Whether we should treat this as unique for the sake of
get_indexer vs get_indexer_non_unique.
For IntervalIndex compat.
"""
return self.is_unique
_requires_unique_msg = "Reindexing only valid with uniquely valued Index objects"
def _maybe_promote(self, other: Index) -> tuple[Index, Index]:
"""
When dealing with an object-dtype Index and a non-object Index, see
if we can upcast the object-dtype one to improve performance.
"""
if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):
if (
self.tz is not None
and other.tz is not None
and not tz_compare(self.tz, other.tz)
):
# standardize on UTC
return self.tz_convert("UTC"), other.tz_convert("UTC")
elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex):
try:
return type(other)(self), other
except OutOfBoundsDatetime:
return self, other
elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex):
# TODO: we dont have tests that get here
return type(other)(self), other
elif self.dtype.kind == "u" and other.dtype.kind == "i":
# GH#41873
if other.min() >= 0:
# lookup min as it may be cached
# TODO: may need itemsize check if we have non-64-bit Indexes
return self, other.astype(self.dtype)
elif self._is_multi and not other._is_multi:
try:
# "Type[Index]" has no attribute "from_tuples"
other = type(self).from_tuples(other) # type: ignore[attr-defined]
except (TypeError, ValueError):
# let's instead try with a straight Index
self = Index(self._values)
if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype):
# Reverse op so we dont need to re-implement on the subclasses
other, self = other._maybe_promote(self)
return self, other
def _find_common_type_compat(self, target) -> DtypeObj:
"""
Implementation of find_common_type that adjusts for Index-specific
special cases.
"""
target_dtype, _ = infer_dtype_from(target, pandas_dtype=True)
# special case: if one dtype is uint64 and the other a signed int, return object
# See https://github.com/pandas-dev/pandas/issues/26778 for discussion
# Now it's:
# * float | [u]int -> float
# * uint64 | signed int -> object
# We may change union(float | [u]int) to go to object.
if self.dtype == "uint64" or target_dtype == "uint64":
if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype(
target_dtype
):
return _dtype_obj
dtype = find_result_type(self._values, target)
dtype = common_dtype_categorical_compat([self, target], dtype)
return dtype
def _should_compare(self, other: Index) -> bool:
"""
Check if `self == other` can ever have non-False entries.
"""
if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or (
is_bool_dtype(self) and is_any_real_numeric_dtype(other)
):
# GH#16877 Treat boolean labels passed to a numeric index as not
# found. Without this fix False and True would be treated as 0 and 1
# respectively.
return False
other = _unpack_nested_dtype(other)
dtype = other.dtype
return self._is_comparable_dtype(dtype) or is_object_dtype(dtype)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if self.dtype.kind == "b":
return dtype.kind == "b"
elif is_numeric_dtype(self.dtype):
return is_numeric_dtype(dtype)
# TODO: this was written assuming we only get here with object-dtype,
# which is nom longer correct. Can we specialize for EA?
return True
def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]:
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
if isinstance(values, ABCMultiIndex):
values = values._values
values = Categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in result.items()}
return PrettyDict(result)
def map(self, mapper, na_action=None):
"""
Map values using an input mapping or function.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
Union[Index, MultiIndex]
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from pandas.core.indexes.multi import MultiIndex
new_values = self._map_values(mapper, na_action=na_action)
# we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
elif self.name:
names = [self.name] * len(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values, names=names)
dtype = None
if not new_values.size:
# empty
dtype = self.dtype
# e.g. if we are floating and new_values is all ints, then we
# don't want to cast back to floating. But if we are UInt64
# and new_values is all ints, we want to try.
same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type
if same_dtype:
new_values = maybe_cast_pointwise_result(
new_values, self.dtype, same_dtype=same_dtype
)
return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name)
# TODO: De-duplicate with map, xref GH#32349
def _transform_index(self, func, *, level=None) -> Index:
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(self, ABCMultiIndex):
values = [
self.get_level_values(i).map(func)
if i == level or level is None
else self.get_level_values(i)
for i in range(self.nlevels)
]
return type(self).from_arrays(values)
else:
items = [func(x) for x in self]
return Index(items, name=self.name, tupleize_cols=False)
def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The length of the returned boolean array matches
the length of the index.
Parameters
----------
values : set or list-like
Sought values.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
np.ndarray[bool]
NumPy array of boolean values.
See Also
--------
Series.isin : Same for Series.
DataFrame.isin : Same method for DataFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same length as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = pd.Index([1,2,3])
>>> idx
Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
MultiIndex([(1, 'red'),
(2, 'blue'),
(3, 'green')],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
For a DatetimeIndex, string values in `values` are converted to
Timestamps.
>>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']
>>> dti = pd.to_datetime(dates)
>>> dti
DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],
dtype='datetime64[ns]', freq=None)
>>> dti.isin(['2000-03-11'])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(self._values, values)
def _get_string_slice(self, key: str_t):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(
self,
start: Hashable | None = None,
end: Hashable | None = None,
step: int | None = None,
) -> slice:
"""
Compute the slice indexer for input labels and step.
Index needs to be ordered and unique.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, default None
Returns
-------
slice
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
Notes
-----
This function assumes that the data is sorted, so use at your own peril
Examples
--------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_indexer(start='b', end='c')
slice(1, 3, None)
>>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
>>> idx.slice_indexer(start='b', end=('c', 'g'))
slice(1, 3, None)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index, then try to cast
to an int if equivalent.
"""
return key
def _maybe_cast_listlike_indexer(self, target) -> Index:
"""
Analogue to maybe_cast_indexer for get_indexer instead of get_loc.
"""
return ensure_index(target)
def _validate_indexer(self, form: str_t, key, kind: str_t) -> None:
"""
If we are positional indexer, validate that we have appropriate
typed bounds must be an integer.
"""
assert kind in ["getitem", "iloc"]
if key is not None and not is_integer(key):
self._raise_invalid_indexer(form, key)
def _maybe_cast_slice_bound(self, label, side: str_t):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. datetimelike Indexes
if is_numeric_dtype(self.dtype):
return self._maybe_cast_indexer(label)
# reject them, if index does not contain label
if (is_float(label) or is_integer(label)) and label not in self:
self._raise_invalid_indexer("slice", label)
return label
def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(
label, side="right" if side == "left" else "left"
)
return len(self) - pos
raise ValueError("index must be monotonic increasing or decreasing")
def get_slice_bound(self, label, side: Literal["left", "right"]) -> int:
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
Returns
-------
int
Index of label.
"""
if side not in ("left", "right"):
raise ValueError(
"Invalid value for side kwarg, must be either "
f"'left' or 'right': {side}"
)
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side)
# we need to look up the label
try:
slc = self.get_loc(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array, which
# is OK as long as they are representable by a slice.
assert is_bool_dtype(slc.dtype)
slc = lib.maybe_booleans_to_slice(slc.view("u1"))
if isinstance(slc, np.ndarray):
raise KeyError(
f"Cannot get {side} slice bound for non-unique "
f"label: {repr(original_label)}"
)
if isinstance(slc, slice):
if side == "left":
return slc.start
else:
return slc.stop
else:
if side == "right":
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]:
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, defaults None
If None, defaults to 1.
Returns
-------
tuple[int, int]
See Also
--------
Index.get_loc : Get location for a single label.
Notes
-----
This method only works if the index is monotonic or unique.
Examples
--------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3)
"""
inc = step is None or step >= 0
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
# GH 16785: If start and end happen to be date strings with UTC offsets
# attempt to parse and check that the offsets are the same
if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)):
try:
ts_start = Timestamp(start)
ts_end = Timestamp(end)
except (ValueError, TypeError):
pass
else:
if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):
raise ValueError("Both dates must have the same UTC offset")
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, "left")
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, "right")
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self: _IndexT, loc) -> _IndexT:
"""
Make new Index with passed location(-s) deleted.
Parameters
----------
loc : int or list of int
Location of item(-s) which will be deleted.
Use a list of locations to delete more than one value at the same time.
Returns
-------
Index
Will be same type as self, except for RangeIndex.
See Also
--------
numpy.delete : Delete any rows and column from NumPy array (ndarray).
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.delete(1)
Index(['a', 'c'], dtype='object')
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.delete([0, 2])
Index(['b'], dtype='object')
"""
values = self._values
res_values: ArrayLike
if isinstance(values, np.ndarray):
# TODO(__array_function__): special casing will be unnecessary
res_values = np.delete(values, loc)
else:
res_values = values.delete(loc)
# _constructor so RangeIndex-> Index with an int64 dtype
return self._constructor._simple_new(res_values, name=self.name)
def insert(self, loc: int, item) -> Index:
"""
Make new Index inserting new item at location.
Follows Python numpy.insert semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
Index
"""
item = lib.item_from_zerodim(item)
if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object:
item = self._na_value
arr = self._values
try:
if isinstance(arr, ExtensionArray):
res_values = arr.insert(loc, item)
return type(self)._simple_new(res_values, name=self.name)
else:
item = self._validate_fill_value(item)
except (TypeError, ValueError, LossySetitemError):
# e.g. trying to insert an integer into a DatetimeIndex
# We cannot keep the same dtype, so cast to the (often object)
# minimal shared dtype before doing the insert.
dtype = self._find_common_type_compat(item)
return self.astype(dtype).insert(loc, item)
if arr.dtype != object or not isinstance(
item, (tuple, np.datetime64, np.timedelta64)
):
# with object-dtype we need to worry about numpy incorrectly casting
# dt64/td64 to integer, also about treating tuples as sequences
# special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550
casted = arr.dtype.type(item)
new_values = np.insert(arr, loc, casted)
else:
# error: No overload variant of "insert" matches argument types
# "ndarray[Any, Any]", "int", "None"
new_values = np.insert(arr, loc, None) # type: ignore[call-overload]
loc = loc if loc >= 0 else loc - 1
new_values[loc] = item
return Index._with_infer(new_values, name=self.name)
def drop(
self,
labels: Index | np.ndarray | Iterable[Hashable],
errors: IgnoreRaise = "raise",
) -> Index:
"""
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like or scalar
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
Index
Will be same type as self, except for RangeIndex.
Raises
------
KeyError
If not all of the labels are found in the selected axis
"""
if not isinstance(labels, Index):
# avoid materializing e.g. RangeIndex
arr_dtype = "object" if self.dtype == "object" else None
labels = com.index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer_for(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
raise KeyError(f"{list(labels[mask])} not found in axis")
indexer = indexer[~mask]
return self.delete(indexer)
def infer_objects(self, copy: bool = True) -> Index:
"""
If we have an object dtype, try to infer a non-object dtype.
Parameters
----------
copy : bool, default True
Whether to make a copy in cases where no inference occurs.
"""
if self._is_multi:
raise NotImplementedError(
"infer_objects is not implemented for MultiIndex. "
"Use index.to_frame().infer_objects() instead."
)
if self.dtype != object:
return self.copy() if copy else self
values = self._values
values = cast("npt.NDArray[np.object_]", values)
res_values = lib.maybe_convert_objects(
values,
convert_datetime=True,
convert_timedelta=True,
convert_period=True,
convert_interval=True,
)
if copy and res_values is values:
return self.copy()
result = Index(res_values, name=self.name)
if not copy and res_values is values and self._references is not None:
result._references = self._references
result._references.add_index_reference(result)
return result
# --------------------------------------------------------------------
# Generated Arithmetic, Comparison, and Unary Methods
def _cmp_method(self, other, op):
"""
Wrapper used to dispatch comparison operations.
"""
if self.is_(other):
# fastpath
if op in {operator.eq, operator.le, operator.ge}:
arr = np.ones(len(self), dtype=bool)
if self._can_hold_na and not isinstance(self, ABCMultiIndex):
# TODO: should set MultiIndex._can_hold_na = False?
arr[self.isna()] = False
return arr
elif op is operator.ne:
arr = np.zeros(len(self), dtype=bool)
if self._can_hold_na and not isinstance(self, ABCMultiIndex):
arr[self.isna()] = True
return arr
if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len(
self
) != len(other):
raise ValueError("Lengths must match to compare")
if not isinstance(other, ABCMultiIndex):
other = extract_array(other, extract_numpy=True)
else:
other = np.asarray(other)
if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray):
# e.g. PeriodArray, Categorical
with np.errstate(all="ignore"):
result = op(self._values, other)
elif isinstance(self._values, ExtensionArray):
result = op(self._values, other)
elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex):
# don't pass MultiIndex
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(op, self._values, other)
else:
with np.errstate(all="ignore"):
result = ops.comparison_op(self._values, other, op)
return result
def _logical_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
res_values = ops.logical_op(lvalues, rvalues, op)
return self._construct_result(res_values, name=res_name)
def _construct_result(self, result, name):
if isinstance(result, tuple):
return (
Index(result[0], name=name, dtype=result[0].dtype),
Index(result[1], name=name, dtype=result[1].dtype),
)
return Index(result, name=name, dtype=result.dtype)
def _arith_method(self, other, op):
if (
isinstance(other, Index)
and is_object_dtype(other.dtype)
and type(other) is not Index
):
# We return NotImplemented for object-dtype index *subclasses* so they have
# a chance to implement ops before we unwrap them.
# See https://github.com/pandas-dev/pandas/issues/31109
return NotImplemented
return super()._arith_method(other, op)
def _unary_method(self, op):
result = op(self._values)
return Index(result, name=self.name)
def __abs__(self) -> Index:
return self._unary_method(operator.abs)
def __neg__(self) -> Index:
return self._unary_method(operator.neg)
def __pos__(self) -> Index:
return self._unary_method(operator.pos)
def __invert__(self) -> Index:
# GH#8875
return self._unary_method(operator.inv)
# --------------------------------------------------------------------
# Reductions
def any(self, *args, **kwargs):
"""
Return whether any element is Truthy.
Parameters
----------
*args
Required for compatibility with numpy.
**kwargs
Required for compatibility with numpy.
Returns
-------
bool or array-like (if axis is specified)
A single element array-like may be converted to bool.
See Also
--------
Index.all : Return whether all elements are True.
Series.all : Return whether all elements are True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to True because these are not equal to zero.
Examples
--------
>>> index = pd.Index([0, 1, 2])
>>> index.any()
True
>>> index = pd.Index([0, 0, 0])
>>> index.any()
False
"""
nv.validate_any(args, kwargs)
self._maybe_disable_logical_methods("any")
# error: Argument 1 to "any" has incompatible type "ArrayLike"; expected
# "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int,
# float, complex, str, bytes, generic]], Sequence[Sequence[Any]],
# _SupportsArray]"
return np.any(self.values) # type: ignore[arg-type]
def all(self, *args, **kwargs):
"""
Return whether all elements are Truthy.
Parameters
----------
*args
Required for compatibility with numpy.
**kwargs
Required for compatibility with numpy.
Returns
-------
bool or array-like (if axis is specified)
A single element array-like may be converted to bool.
See Also
--------
Index.any : Return whether any element in an Index is True.
Series.any : Return whether any element in a Series is True.
Series.all : Return whether all elements in a Series are True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to True because these are not equal to zero.
Examples
--------
True, because nonzero integers are considered True.
>>> pd.Index([1, 2, 3]).all()
True
False, because ``0`` is considered False.
>>> pd.Index([0, 1, 2]).all()
False
"""
nv.validate_all(args, kwargs)
self._maybe_disable_logical_methods("all")
# error: Argument 1 to "all" has incompatible type "ArrayLike"; expected
# "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int,
# float, complex, str, bytes, generic]], Sequence[Sequence[Any]],
# _SupportsArray]"
return np.all(self.values) # type: ignore[arg-type]
def _maybe_disable_logical_methods(self, opname: str_t) -> None:
"""
raise if this Index subclass does not support any or all.
"""
if (
isinstance(self, ABCMultiIndex)
or needs_i8_conversion(self.dtype)
or is_interval_dtype(self.dtype)
or is_categorical_dtype(self.dtype)
or is_float_dtype(self.dtype)
):
# This call will raise
make_invalid_op(opname)(self)
def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
if not self._is_multi and self.hasnans:
# Take advantage of cache
mask = self._isnan
if not skipna or mask.all():
return -1
return super().argmin(skipna=skipna)
def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
if not self._is_multi and self.hasnans:
# Take advantage of cache
mask = self._isnan
if not skipna or mask.all():
return -1
return super().argmax(skipna=skipna)
def min(self, axis=None, skipna: bool = True, *args, **kwargs):
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
if len(self) and self.is_monotonic_increasing:
# quick check
first = self[0]
if not isna(first):
return first
if not self._is_multi and self.hasnans:
# Take advantage of cache
mask = self._isnan
if not skipna or mask.all():
return self._na_value
if not self._is_multi and not isinstance(self._values, np.ndarray):
return self._values._reduce(name="min", skipna=skipna)
return super().min(skipna=skipna)
def max(self, axis=None, skipna: bool = True, *args, **kwargs):
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
if len(self) and self.is_monotonic_increasing:
# quick check
last = self[-1]
if not isna(last):
return last
if not self._is_multi and self.hasnans:
# Take advantage of cache
mask = self._isnan
if not skipna or mask.all():
return self._na_value
if not self._is_multi and not isinstance(self._values, np.ndarray):
return self._values._reduce(name="max", skipna=skipna)
return super().max(skipna=skipna)
# --------------------------------------------------------------------
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
"""
# See GH#27775, GH#27384 for history/reasoning in how this is defined.
return (len(self),)
class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
"""
Type for categorical data with the categories and orderedness.
Parameters
----------
categories : sequence, optional
Must be unique, and must not contain any nulls.
The categories are stored in an Index,
and if an index is provided the dtype of that index will be used.
ordered : bool or None, default False
Whether or not this categorical is treated as a ordered categorical.
None can be used to maintain the ordered value of existing categoricals when
used in operations that combine categoricals, e.g. astype, and will resolve to
False if there is no existing ordered to maintain.
Attributes
----------
categories
ordered
Methods
-------
None
See Also
--------
Categorical : Represent a categorical variable in classic R / S-plus fashion.
Notes
-----
This class is useful for specifying the type of a ``Categorical``
independent of the values. See :ref:`categorical.categoricaldtype`
for more.
Examples
--------
>>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> pd.Series(['a', 'b', 'a', 'c'], dtype=t)
0 a
1 b
2 a
3 NaN
dtype: category
Categories (2, object): ['b' < 'a']
An empty CategoricalDtype with a specific dtype can be created
by providing an empty index. As follows,
>>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype
dtype('<M8[ns]')
"""
# TODO: Document public vs. private API
name = "category"
type: type[CategoricalDtypeType] = CategoricalDtypeType
kind: str_type = "O"
str = "|O08"
base = np.dtype("O")
_metadata = ("categories", "ordered")
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
def __init__(self, categories=None, ordered: Ordered = False) -> None:
self._finalize(categories, ordered, fastpath=False)
def _from_fastpath(
cls, categories=None, ordered: bool | None = None
) -> CategoricalDtype:
self = cls.__new__(cls)
self._finalize(categories, ordered, fastpath=True)
return self
def _from_categorical_dtype(
cls, dtype: CategoricalDtype, categories=None, ordered: Ordered = None
) -> CategoricalDtype:
if categories is ordered is None:
return dtype
if categories is None:
categories = dtype.categories
if ordered is None:
ordered = dtype.ordered
return cls(categories, ordered)
def _from_values_or_dtype(
cls,
values=None,
categories=None,
ordered: bool | None = None,
dtype: Dtype | None = None,
) -> CategoricalDtype:
"""
Construct dtype from the input parameters used in :class:`Categorical`.
This constructor method specifically does not do the factorization
step, if that is needed to find the categories. This constructor may
therefore return ``CategoricalDtype(categories=None, ordered=None)``,
which may not be useful. Additional steps may therefore have to be
taken to create the final dtype.
The return dtype is specified from the inputs in this prioritized
order:
1. if dtype is a CategoricalDtype, return dtype
2. if dtype is the string 'category', create a CategoricalDtype from
the supplied categories and ordered parameters, and return that.
3. if values is a categorical, use value.dtype, but override it with
categories and ordered if either/both of those are not None.
4. if dtype is None and values is not a categorical, construct the
dtype from categories and ordered, even if either of those is None.
Parameters
----------
values : list-like, optional
The list-like must be 1-dimensional.
categories : list-like, optional
Categories for the CategoricalDtype.
ordered : bool, optional
Designating if the categories are ordered.
dtype : CategoricalDtype or the string "category", optional
If ``CategoricalDtype``, cannot be used together with
`categories` or `ordered`.
Returns
-------
CategoricalDtype
Examples
--------
>>> pd.CategoricalDtype._from_values_or_dtype()
CategoricalDtype(categories=None, ordered=None)
>>> pd.CategoricalDtype._from_values_or_dtype(
... categories=['a', 'b'], ordered=True
... )
CategoricalDtype(categories=['a', 'b'], ordered=True)
>>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False)
>>> c = pd.Categorical([0, 1], dtype=dtype1, fastpath=True)
>>> pd.CategoricalDtype._from_values_or_dtype(
... c, ['x', 'y'], ordered=True, dtype=dtype2
... )
Traceback (most recent call last):
...
ValueError: Cannot specify `categories` or `ordered` together with
`dtype`.
The supplied dtype takes precedence over values' dtype:
>>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2)
CategoricalDtype(categories=['x', 'y'], ordered=False)
"""
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, str):
if dtype == "category":
if ordered is None and cls.is_dtype(values):
# GH#49309 preserve orderedness
ordered = values.dtype.ordered
dtype = CategoricalDtype(categories, ordered)
else:
raise ValueError(f"Unknown dtype {repr(dtype)}")
elif categories is not None or ordered is not None:
raise ValueError(
"Cannot specify `categories` or `ordered` together with `dtype`."
)
elif not isinstance(dtype, CategoricalDtype):
raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}")
elif cls.is_dtype(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(
values.dtype, categories, ordered
)
else:
# If dtype=None and values is not categorical, create a new dtype.
# Note: This could potentially have categories=None and
# ordered=None.
dtype = CategoricalDtype(categories, ordered)
return cast(CategoricalDtype, dtype)
def construct_from_string(cls, string: str_type) -> CategoricalDtype:
"""
Construct a CategoricalDtype from a string.
Parameters
----------
string : str
Must be the string "category" in order to be successfully constructed.
Returns
-------
CategoricalDtype
Instance of the dtype.
Raises
------
TypeError
If a CategoricalDtype cannot be constructed from the input.
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
if string != cls.name:
raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'")
# need ordered=None to ensure that operations specifying dtype="category" don't
# override the ordered value for existing categoricals
return cls(ordered=None)
def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None:
if ordered is not None:
self.validate_ordered(ordered)
if categories is not None:
categories = self.validate_categories(categories, fastpath=fastpath)
self._categories = categories
self._ordered = ordered
def __setstate__(self, state: MutableMapping[str_type, Any]) -> None:
# for pickle compat. __get_state__ is defined in the
# PandasExtensionDtype superclass and uses the public properties to
# pickle -> need to set the settable private ones here (see GH26067)
self._categories = state.pop("categories", None)
self._ordered = state.pop("ordered", False)
def __hash__(self) -> int:
# _hash_categories returns a uint64, so use the negative
# space for when we have unknown categories to avoid a conflict
if self.categories is None:
if self.ordered:
return -1
else:
return -2
# We *do* want to include the real self.ordered here
return int(self._hash_categories)
def __eq__(self, other: Any) -> bool:
"""
Rules for CDT equality:
1) Any CDT is equal to the string 'category'
2) Any CDT is equal to itself
3) Any CDT is equal to a CDT with categories=None regardless of ordered
4) A CDT with ordered=True is only equal to another CDT with
ordered=True and identical categories in the same order
5) A CDT with ordered={False, None} is only equal to another CDT with
ordered={False, None} and identical categories, but same order is
not required. There is no distinction between False/None.
6) Any other comparison returns False
"""
if isinstance(other, str):
return other == self.name
elif other is self:
return True
elif not (hasattr(other, "ordered") and hasattr(other, "categories")):
return False
elif self.categories is None or other.categories is None:
# For non-fully-initialized dtypes, these are only equal to
# - the string "category" (handled above)
# - other CategoricalDtype with categories=None
return self.categories is other.categories
elif self.ordered or other.ordered:
# At least one has ordered=True; equal if both have ordered=True
# and the same values for categories in the same order.
return (self.ordered == other.ordered) and self.categories.equals(
other.categories
)
else:
# Neither has ordered=True; equal if both have the same categories,
# but same order is not necessary. There is no distinction between
# ordered=False and ordered=None: CDT(., False) and CDT(., None)
# will be equal if they have the same categories.
left = self.categories
right = other.categories
# GH#36280 the ordering of checks here is for performance
if not left.dtype == right.dtype:
return False
if len(left) != len(right):
return False
if self.categories.equals(other.categories):
# Check and see if they happen to be identical categories
return True
if left.dtype != object:
# Faster than calculating hash
indexer = left.get_indexer(right)
# Because left and right have the same length and are unique,
# `indexer` not having any -1s implies that there is a
# bijection between `left` and `right`.
return (indexer != -1).all()
# With object-dtype we need a comparison that identifies
# e.g. int(2) as distinct from float(2)
return hash(self) == hash(other)
def __repr__(self) -> str_type:
if self.categories is None:
data = "None"
else:
data = self.categories._format_data(name=type(self).__name__)
if data is None:
# self.categories is RangeIndex
data = str(self.categories._range)
data = data.rstrip(", ")
return f"CategoricalDtype(categories={data}, ordered={self.ordered})"
def _hash_categories(self) -> int:
from pandas.core.util.hashing import (
combine_hash_arrays,
hash_array,
hash_tuples,
)
categories = self.categories
ordered = self.ordered
if len(categories) and isinstance(categories[0], tuple):
# assumes if any individual category is a tuple, then all our. ATM
# I don't really want to support just some of the categories being
# tuples.
cat_list = list(categories) # breaks if a np.array of categories
cat_array = hash_tuples(cat_list)
else:
if categories.dtype == "O" and len({type(x) for x in categories}) != 1:
# TODO: hash_array doesn't handle mixed types. It casts
# everything to a str first, which means we treat
# {'1', '2'} the same as {'1', 2}
# find a better solution
hashed = hash((tuple(categories), ordered))
return hashed
if DatetimeTZDtype.is_dtype(categories.dtype):
# Avoid future warning.
categories = categories.view("datetime64[ns]")
cat_array = hash_array(np.asarray(categories), categorize=False)
if ordered:
cat_array = np.vstack(
[cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)]
)
else:
cat_array = np.array([cat_array])
combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array))
return np.bitwise_xor.reduce(combined_hashed)
def construct_array_type(cls) -> type_t[Categorical]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
from pandas import Categorical
return Categorical
def validate_ordered(ordered: Ordered) -> None:
"""
Validates that we have a valid ordered parameter. If
it is not a boolean, a TypeError will be raised.
Parameters
----------
ordered : object
The parameter to be verified.
Raises
------
TypeError
If 'ordered' is not a boolean.
"""
if not is_bool(ordered):
raise TypeError("'ordered' must either be 'True' or 'False'")
def validate_categories(categories, fastpath: bool = False) -> Index:
"""
Validates that we have good categories
Parameters
----------
categories : array-like
fastpath : bool
Whether to skip nan and uniqueness checks
Returns
-------
categories : Index
"""
from pandas.core.indexes.base import Index
if not fastpath and not is_list_like(categories):
raise TypeError(
f"Parameter 'categories' must be list-like, was {repr(categories)}"
)
if not isinstance(categories, ABCIndex):
categories = Index._with_infer(categories, tupleize_cols=False)
if not fastpath:
if categories.hasnans:
raise ValueError("Categorical categories cannot be null")
if not categories.is_unique:
raise ValueError("Categorical categories must be unique")
if isinstance(categories, ABCCategoricalIndex):
categories = categories.categories
return categories
def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype:
"""
Returns a CategoricalDtype with categories and ordered taken from dtype
if specified, otherwise falling back to self if unspecified
Parameters
----------
dtype : CategoricalDtype
Returns
-------
new_dtype : CategoricalDtype
"""
if isinstance(dtype, str) and dtype == "category":
# dtype='category' should not change anything
return self
elif not self.is_dtype(dtype):
raise ValueError(
f"a CategoricalDtype must be passed to perform an update, "
f"got {repr(dtype)}"
)
else:
# from here on, dtype is a CategoricalDtype
dtype = cast(CategoricalDtype, dtype)
# update categories/ordered unless they've been explicitly passed as None
new_categories = (
dtype.categories if dtype.categories is not None else self.categories
)
new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered
return CategoricalDtype(new_categories, new_ordered)
def categories(self) -> Index:
"""
An ``Index`` containing the unique categories allowed.
"""
return self._categories
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self._ordered
def _is_boolean(self) -> bool:
from pandas.core.dtypes.common import is_bool_dtype
return is_bool_dtype(self.categories)
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
from pandas.core.arrays.sparse import SparseDtype
# check if we have all categorical dtype with identical categories
if all(isinstance(x, CategoricalDtype) for x in dtypes):
first = dtypes[0]
if all(first == other for other in dtypes[1:]):
return first
# special case non-initialized categorical
# TODO we should figure out the expected return value in general
non_init_cats = [
isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes
]
if all(non_init_cats):
return self
elif any(non_init_cats):
return None
# categorical is aware of Sparse -> extract sparse subdtypes
dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
# extract the categories' dtype
non_cat_dtypes = [
x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes
]
# TODO should categorical always give an answer?
from pandas.core.dtypes.cast import find_common_type
return find_common_type(non_cat_dtypes)
The provided code snippet includes necessary dependencies for implementing the `_unpack_nested_dtype` function. Write a Python function `def _unpack_nested_dtype(other: Index) -> Index` to solve the following problem:
When checking if our dtype is comparable with another, we need to unpack CategoricalDtype to look at its categories.dtype. Parameters ---------- other : Index Returns ------- Index
Here is the function:
def _unpack_nested_dtype(other: Index) -> Index:
"""
When checking if our dtype is comparable with another, we need
to unpack CategoricalDtype to look at its categories.dtype.
Parameters
----------
other : Index
Returns
-------
Index
"""
dtype = other.dtype
if isinstance(dtype, CategoricalDtype):
# If there is ever a SparseIndex, this could get dispatched
# here too.
return dtype.categories
return other | When checking if our dtype is comparable with another, we need to unpack CategoricalDtype to look at its categories.dtype. Parameters ---------- other : Index Returns ------- Index |
173,181 | from __future__ import annotations
from datetime import datetime
import functools
from itertools import zip_longest
import operator
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Hashable,
Iterable,
Literal,
NoReturn,
Sequence,
TypeVar,
cast,
final,
overload,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import (
NaT,
algos as libalgos,
index as libindex,
lib,
)
from pandas._libs.internals import BlockValuesRefs
import pandas._libs.join as libjoin
from pandas._libs.lib import (
is_datetime_array,
no_default,
)
from pandas._libs.missing import is_float_nan
from pandas._libs.tslibs import (
IncompatibleFrequency,
OutOfBoundsDatetime,
Timestamp,
tz_compare,
)
from pandas._typing import (
AnyAll,
ArrayLike,
Axes,
Axis,
DropKeep,
DtypeObj,
F,
IgnoreRaise,
IndexLabel,
JoinHow,
Level,
Shape,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import (
DuplicateLabelError,
InvalidIndexError,
)
from pandas.util._decorators import (
Appender,
cache_readonly,
doc,
)
from pandas.util._exceptions import (
find_stack_level,
rewrite_exception,
)
from pandas.core.dtypes.astype import (
astype_array,
astype_is_view,
)
from pandas.core.dtypes.cast import (
LossySetitemError,
can_hold_element,
common_dtype_categorical_compat,
find_result_type,
infer_dtype_from,
maybe_cast_pointwise_result,
np_can_hold_element,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_any_real_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_dtype_equal,
is_ea_or_datetimelike_dtype,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_iterator,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_signed_integer_dtype,
is_string_dtype,
needs_i8_conversion,
pandas_dtype,
validate_all_hashable,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCMultiIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas.core.dtypes.inference import is_dict_like
from pandas.core.dtypes.missing import (
array_equivalent,
is_valid_na_for_dtype,
isna,
)
from pandas.core import (
arraylike,
ops,
)
from pandas.core.accessor import CachedAccessor
import pandas.core.algorithms as algos
from pandas.core.array_algos.putmask import (
setitem_datetimelike_compat,
validate_putmask,
)
from pandas.core.arrays import (
ArrowExtensionArray,
BaseMaskedArray,
Categorical,
ExtensionArray,
)
from pandas.core.arrays.string_ import StringArray
from pandas.core.base import (
IndexOpsMixin,
PandasObject,
)
import pandas.core.common as com
from pandas.core.construction import (
ensure_wrapped_if_datetimelike,
extract_array,
sanitize_array,
)
from pandas.core.indexers import disallow_ndim_indexing
from pandas.core.indexes.frozen import FrozenList
from pandas.core.missing import clean_reindex_fill_method
from pandas.core.ops import get_op_result_name
from pandas.core.ops.invalid import make_invalid_op
from pandas.core.sorting import (
ensure_key_mapped,
get_group_index_sorter,
nargsort,
)
from pandas.core.strings.accessor import StringMethods
from pandas.io.formats.printing import (
PrettyDict,
default_pprint,
format_object_summary,
pprint_thing,
)
def find_stack_level() -> int:
"""
Find the first place in the stack that is not inside pandas
(tests notwithstanding).
"""
import pandas as pd
pkg_dir = os.path.dirname(pd.__file__)
test_dir = os.path.join(pkg_dir, "tests")
# https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
frame = inspect.currentframe()
n = 0
while frame:
fname = inspect.getfile(frame)
if fname.startswith(pkg_dir) and not fname.startswith(test_dir):
frame = frame.f_back
n += 1
else:
break
return n
)
def _maybe_try_sort(result, sort):
if sort is not False:
try:
result = algos.safe_sort(result)
except TypeError as err:
if sort is True:
raise
warnings.warn(
f"{err}, sort order is undefined for incomparable objects.",
RuntimeWarning,
stacklevel=find_stack_level(),
)
return result | null |
173,182 | from __future__ import annotations
import datetime as dt
import operator
from typing import (
TYPE_CHECKING,
Hashable,
)
import warnings
import numpy as np
import pytz
from pandas._libs import (
NaT,
Period,
Timestamp,
index as libindex,
lib,
)
from pandas._libs.tslibs import (
Resolution,
periods_per_day,
timezones,
to_offset,
)
from pandas._libs.tslibs.offsets import prefix_mapping
from pandas._typing import (
Dtype,
DtypeObj,
Frequency,
IntervalClosedType,
TimeAmbiguous,
TimeNonexistent,
npt,
)
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import is_valid_na_for_dtype
from pandas.core.arrays.datetimes import (
DatetimeArray,
tz_to_dtype,
)
import pandas.core.common as com
from pandas.core.indexes.base import (
Index,
maybe_extract_name,
)
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
from pandas.core.tools.times import to_time
class DatetimeIndex(DatetimeTimedeltaMixin):
"""
Immutable ndarray-like of datetime64 data.
Represented internally as int64, and which can be boxed to Timestamp objects
that are subclasses of datetime and carry metadata.
.. versionchanged:: 2.0.0
The various numeric date/time attributes (:attr:`~DatetimeIndex.day`,
:attr:`~DatetimeIndex.month`, :attr:`~DatetimeIndex.year` etc.) now have dtype
``int32``. Previously they had dtype ``int64``.
Parameters
----------
data : array-like (1-dimensional)
Datetime-like data to construct index with.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str
Set the Timezone of the data.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
closed : {'left', 'right'}, optional
Set whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from 03:00
DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC
and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter
dictates how ambiguous times should be handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times.
dayfirst : bool, default False
If True, parse dates in `data` with the day first order.
yearfirst : bool, default False
If True parse dates in `data` with the year first order.
dtype : numpy.dtype or DatetimeTZDtype or str, default None
Note that the only NumPy dtype allowed is ‘datetime64[ns]’.
copy : bool, default False
Make a copy of input ndarray.
name : label, default None
Name to be stored in the index.
Attributes
----------
year
month
day
hour
minute
second
microsecond
nanosecond
date
time
timetz
dayofyear
day_of_year
weekofyear
week
dayofweek
day_of_week
weekday
quarter
tz
freq
freqstr
is_month_start
is_month_end
is_quarter_start
is_quarter_end
is_year_start
is_year_end
is_leap_year
inferred_freq
Methods
-------
normalize
strftime
snap
tz_convert
tz_localize
round
floor
ceil
to_period
to_pydatetime
to_series
to_frame
month_name
day_name
mean
std
See Also
--------
Index : The base pandas Index type.
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
to_datetime : Convert argument to datetime.
date_range : Create a fixed-frequency DatetimeIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
"""
_typ = "datetimeindex"
_data_cls = DatetimeArray
_supports_partial_string_indexing = True
def _engine_type(self) -> type[libindex.DatetimeEngine]:
return libindex.DatetimeEngine
_data: DatetimeArray
tz: dt.tzinfo | None
# --------------------------------------------------------------------
# methods that dispatch to DatetimeArray and wrap result
def strftime(self, date_format) -> Index:
arr = self._data.strftime(date_format)
return Index(arr, name=self.name, dtype=object)
def tz_convert(self, tz) -> DatetimeIndex:
arr = self._data.tz_convert(tz)
return type(self)._simple_new(arr, name=self.name, refs=self._references)
def tz_localize(
self,
tz,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
) -> DatetimeIndex:
arr = self._data.tz_localize(tz, ambiguous, nonexistent)
return type(self)._simple_new(arr, name=self.name)
def to_period(self, freq=None) -> PeriodIndex:
from pandas.core.indexes.api import PeriodIndex
arr = self._data.to_period(freq)
return PeriodIndex._simple_new(arr, name=self.name)
def to_julian_date(self) -> Index:
arr = self._data.to_julian_date()
return Index._simple_new(arr, name=self.name)
def isocalendar(self) -> DataFrame:
df = self._data.isocalendar()
return df.set_index(self)
def _resolution_obj(self) -> Resolution:
return self._data._resolution_obj
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
freq: Frequency | lib.NoDefault = lib.no_default,
tz=lib.no_default,
normalize: bool = False,
closed=None,
ambiguous: TimeAmbiguous = "raise",
dayfirst: bool = False,
yearfirst: bool = False,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> DatetimeIndex:
if is_scalar(data):
cls._raise_scalar_data_error(data)
# - Cases checked above all return/raise before reaching here - #
name = maybe_extract_name(name, data, cls)
if (
isinstance(data, DatetimeArray)
and freq is lib.no_default
and tz is lib.no_default
and dtype is None
):
# fastpath, similar logic in TimedeltaIndex.__new__;
# Note in this particular case we retain non-nano.
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
dtarr = DatetimeArray._from_sequence_not_strict(
data,
dtype=dtype,
copy=copy,
tz=tz,
freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
refs = None
if not copy and isinstance(data, (Index, ABCSeries)):
refs = data._references
subarr = cls._simple_new(dtarr, name=name, refs=refs)
return subarr
# --------------------------------------------------------------------
def _is_dates_only(self) -> bool:
"""
Return a boolean if we are only dates (and don't have a timezone)
Returns
-------
bool
"""
from pandas.io.formats.format import is_dates_only
# error: Argument 1 to "is_dates_only" has incompatible type
# "Union[ExtensionArray, ndarray]"; expected "Union[ndarray,
# DatetimeArray, Index, DatetimeIndex]"
return self.tz is None and is_dates_only(self._values) # type: ignore[arg-type]
def __reduce__(self):
d = {"data": self._data, "name": self.name}
return _new_DatetimeIndex, (type(self), d), None
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if self.tz is not None:
# If we have tz, we can compare to tzaware
return is_datetime64tz_dtype(dtype)
# if we dont have tz, we can only compare to tznaive
return is_datetime64_dtype(dtype)
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self):
from pandas.io.formats.format import get_format_datetime64
formatter = get_format_datetime64(is_dates_only_=self._is_dates_only)
return lambda x: f"'{formatter(x)}'"
# --------------------------------------------------------------------
# Set Operation Methods
def _can_range_setop(self, other) -> bool:
# GH 46702: If self or other have non-UTC tzs, DST transitions prevent
# range representation due to no singular step
if (
self.tz is not None
and not timezones.is_utc(self.tz)
and not timezones.is_fixed_offset(self.tz)
):
return False
if (
other.tz is not None
and not timezones.is_utc(other.tz)
and not timezones.is_fixed_offset(other.tz)
):
return False
return super()._can_range_setop(other)
# --------------------------------------------------------------------
def _get_time_micros(self) -> npt.NDArray[np.int64]:
"""
Return the number of microseconds since midnight.
Returns
-------
ndarray[int64_t]
"""
values = self._data._local_timestamps()
ppd = periods_per_day(self._data._creso)
frac = values % ppd
if self.unit == "ns":
micros = frac // 1000
elif self.unit == "us":
micros = frac
elif self.unit == "ms":
micros = frac * 1000
elif self.unit == "s":
micros = frac * 1_000_000
else: # pragma: no cover
raise NotImplementedError(self.unit)
micros[self._isnan] = -1
return micros
def snap(self, freq: Frequency = "S") -> DatetimeIndex:
"""
Snap time stamps to nearest occurring frequency.
Returns
-------
DatetimeIndex
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
dta = self._data.copy()
for i, v in enumerate(self):
s = v
if not freq.is_on_offset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
dta[i] = s
return DatetimeIndex._simple_new(dta, name=self.name)
# --------------------------------------------------------------------
# Indexing Methods
def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : Resolution
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
per = Period(parsed, freq=reso.attr_abbrev)
start, end = per.start_time, per.end_time
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
# timezone
start = start.tz_localize(parsed.tzinfo)
end = end.tz_localize(parsed.tzinfo)
if parsed.tzinfo is not None:
if self.tz is None:
raise ValueError(
"The index must be timezone aware when indexing "
"with a date string with a UTC offset"
)
# The flipped case with parsed.tz is None and self.tz is not None
# is ruled out bc parsed and reso are produced by _parse_with_reso,
# which localizes parsed.
return start, end
def _parse_with_reso(self, label: str):
parsed, reso = super()._parse_with_reso(label)
parsed = Timestamp(parsed)
if self.tz is not None and parsed.tzinfo is None:
# we special-case timezone-naive strings and timezone-aware
# DatetimeIndex
# https://github.com/pandas-dev/pandas/pull/36148#issuecomment-687883081
parsed = parsed.tz_localize(self.tz)
return parsed, reso
def _disallow_mismatched_indexing(self, key) -> None:
"""
Check for mismatched-tzawareness indexing and re-raise as KeyError.
"""
# we get here with isinstance(key, self._data._recognized_scalars)
try:
# GH#36148
self._data._assert_tzawareness_compat(key)
except TypeError as err:
raise KeyError(key) from err
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
self._check_indexing_error(key)
orig_key = key
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
if isinstance(key, self._data._recognized_scalars):
# needed to localize naive datetimes
self._disallow_mismatched_indexing(key)
key = Timestamp(key)
elif isinstance(key, str):
try:
parsed, reso = self._parse_with_reso(key)
except (ValueError, pytz.NonExistentTimeError) as err:
raise KeyError(key) from err
self._disallow_mismatched_indexing(parsed)
if self._can_partial_date_slice(reso):
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
key = parsed
elif isinstance(key, dt.timedelta):
# GH#20464
raise TypeError(
f"Cannot index {type(self).__name__} with {type(key).__name__}"
)
elif isinstance(key, dt.time):
return self.indexer_at_time(key)
else:
# unrecognized type
raise KeyError(key)
try:
return Index.get_loc(self, key)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_slice_bound(self, label, side: str):
# GH#42855 handle date here instead of get_slice_bound
if isinstance(label, dt.date) and not isinstance(label, dt.datetime):
# Pandas supports slicing with dates, treated as datetimes at midnight.
# https://github.com/pandas-dev/pandas/issues/31501
label = Timestamp(label).to_pydatetime()
label = super()._maybe_cast_slice_bound(label, side)
self._data._assert_tzawareness_compat(label)
return Timestamp(label)
def slice_indexer(self, start=None, end=None, step=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, dt.time) and isinstance(end, dt.time):
if step is not None and step != 1:
raise ValueError("Must have step size of 1 with time slices")
return self.indexer_between_time(start, end)
if isinstance(start, dt.time) or isinstance(end, dt.time):
raise KeyError("Cannot mix time and non-time slice keys")
def check_str_or_none(point) -> bool:
return point is not None and not isinstance(point, str)
# GH#33146 if start and end are combinations of str and None and Index is not
# monotonic, we can not use Index.slice_indexer because it does not honor the
# actual elements, is only searching for start and end
if (
check_str_or_none(start)
or check_str_or_none(end)
or self.is_monotonic_increasing
):
return Index.slice_indexer(self, start, end, step)
mask = np.array(True)
raise_mask = np.array(True)
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, "left")
mask = start_casted <= self
raise_mask = start_casted == self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, "right")
mask = (self <= end_casted) & mask
raise_mask = (end_casted == self) | raise_mask
if not raise_mask.any():
raise KeyError(
"Value based partial slicing on non-monotonic DatetimeIndexes "
"with non-existing keys is not allowed.",
)
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
# --------------------------------------------------------------------
def inferred_type(self) -> str:
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return "datetime64"
def indexer_at_time(self, time, asof: bool = False) -> npt.NDArray[np.intp]:
"""
Return index locations of values at particular time of day.
Parameters
----------
time : datetime.time or str
Time passed in either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p").
Returns
-------
np.ndarray[np.intp]
See Also
--------
indexer_between_time : Get index locations of values between particular
times of day.
DataFrame.at_time : Select values at particular time of day.
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, str):
from dateutil.parser import parse
time = parse(time).time()
if time.tzinfo:
if self.tz is None:
raise ValueError("Index must be timezone aware.")
time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
else:
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (time_micros == micros).nonzero()[0]
def indexer_between_time(
self, start_time, end_time, include_start: bool = True, include_end: bool = True
) -> npt.NDArray[np.intp]:
"""
Return index locations of values between particular times of day.
Parameters
----------
start_time, end_time : datetime.time, str
Time passed either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
include_start : bool, default True
include_end : bool, default True
Returns
-------
np.ndarray[np.intp]
See Also
--------
indexer_at_time : Get index locations of values at particular time of day.
DataFrame.between_time : Select values between particular times of day.
"""
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
return mask.nonzero()[0]
def tz_to_dtype(tz: tzinfo | None, unit: str = "ns"):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
unit : str, default "ns"
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return np.dtype(f"M8[{unit}]")
else:
return DatetimeTZDtype(tz=tz, unit=unit)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_internal_fill_value = np.datetime64("NaT", "ns")
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
def _scalar_type(self) -> type[Timestamp]:
return Timestamp
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = (
_field_ops + _object_ops + _bool_ops + _other_ops + ["unit"]
)
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
"as_unit",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype | DatetimeTZDtype
_freq: BaseOffset | None = None
_default_dtype = DT64NS_DTYPE # used in TimeLikeOps.__init__
def _validate_dtype(cls, values, dtype):
# used in TimeLikeOps.__init__
_validate_dt64_dtype(values.dtype)
dtype = _validate_dt64_dtype(dtype)
return dtype
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
def _simple_new( # type: ignore[override]
cls,
values: np.ndarray,
freq: BaseOffset | None = None,
dtype=DT64NS_DTYPE,
) -> DatetimeArray:
assert isinstance(values, np.ndarray)
assert dtype.kind == "M"
if isinstance(dtype, np.dtype):
assert dtype == values.dtype
assert not is_unitless(dtype)
else:
# DatetimeTZDtype. If we have e.g. DatetimeTZDtype[us, UTC],
# then values.dtype should be M8[us].
assert dtype._creso == get_unit_from_dtype(values.dtype)
result = super()._simple_new(values, dtype)
result._freq = freq
return result
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
def _from_sequence_not_strict(
cls,
data,
*,
dtype=None,
copy: bool = False,
tz=lib.no_default,
freq: str | BaseOffset | lib.NoDefault | None = lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous: TimeAmbiguous = "raise",
):
"""
A non-strict version of _from_sequence, called from DatetimeIndex.__new__.
"""
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
# if the user either explicitly passes tz=None or a tz-naive dtype, we
# disallows inferring a tz.
explicit_tz_none = tz is None
if tz is lib.no_default:
tz = None
else:
tz = timezones.maybe_get_tz(tz)
dtype = _validate_dt64_dtype(dtype)
# if dtype has an embedded tz, capture it
tz = _validate_tz_from_dtype(dtype, tz, explicit_tz_none)
unit = None
if dtype is not None:
if isinstance(dtype, np.dtype):
unit = np.datetime_data(dtype)[0]
else:
# DatetimeTZDtype
unit = dtype.unit
subarr, tz, inferred_freq = _sequence_to_dt64ns(
data,
copy=copy,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
out_unit=unit,
)
# We have to call this again after possibly inferring a tz above
_validate_tz_from_dtype(dtype, tz, explicit_tz_none)
if tz is not None and explicit_tz_none:
raise ValueError(
"Passed data is timezone-aware, incompatible with 'tz=None'. "
"Use obj.tz_localize(None) instead."
)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
data_unit = np.datetime_data(subarr.dtype)[0]
data_dtype = tz_to_dtype(tz, data_unit)
result = cls._simple_new(subarr, freq=freq, dtype=data_dtype)
if unit is not None and unit != result.unit:
# If unit was specified in user-passed dtype, cast to it here
result = result.as_unit(unit)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq, ambiguous=ambiguous)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
# error: Signature of "_generate_range" incompatible with supertype
# "DatetimeLikeArrayMixin"
def _generate_range( # type: ignore[override]
cls,
start,
end,
periods,
freq,
tz=None,
normalize: bool = False,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
inclusive: IntervalClosedType = "both",
*,
unit: str | None = None,
) -> DatetimeArray:
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
if unit is not None:
if unit not in ["s", "ms", "us", "ns"]:
raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'")
else:
unit = "ns"
if start is not None and unit is not None:
start = start.as_unit(unit, round_ok=False)
if end is not None and unit is not None:
end = end.as_unit(unit, round_ok=False)
left_inclusive, right_inclusive = validate_inclusive(inclusive)
start, end = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start_tz = None if start is None else start.tz
end_tz = None if end is None else end.tz
start = _maybe_localize_point(
start, start_tz, start, freq, tz, ambiguous, nonexistent
)
end = _maybe_localize_point(
end, end_tz, end, freq, tz, ambiguous, nonexistent
)
if freq is not None:
# We break Day arithmetic (fixed 24 hour) here and opt for
# Day to mean calendar day (23/24/25 hour). Therefore, strip
# tz info from start and day to avoid DST arithmetic
if isinstance(freq, Day):
if start is not None:
start = start.tz_localize(None)
if end is not None:
end = end.tz_localize(None)
if isinstance(freq, Tick):
i8values = generate_regular_range(start, end, periods, freq, unit=unit)
else:
xdr = _generate_range(
start=start, end=end, periods=periods, offset=freq, unit=unit
)
i8values = np.array([x._value for x in xdr], dtype=np.int64)
endpoint_tz = start.tz if start is not None else end.tz
if tz is not None and endpoint_tz is None:
if not timezones.is_utc(tz):
# short-circuit tz_localize_to_utc which would make
# an unnecessary copy with UTC but be a no-op.
creso = abbrev_to_npy_unit(unit)
i8values = tzconversion.tz_localize_to_utc(
i8values,
tz,
ambiguous=ambiguous,
nonexistent=nonexistent,
creso=creso,
)
# i8values is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent)
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent)
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
i8values = (
np.linspace(0, end._value - start._value, periods, dtype="int64")
+ start._value
)
if i8values.dtype != "i8":
# 2022-01-09 I (brock) am not sure if it is possible for this
# to overflow and cast to e.g. f8, but if it does we need to cast
i8values = i8values.astype("i8")
if start == end:
if not left_inclusive and not right_inclusive:
i8values = i8values[1:-1]
else:
start_i8 = Timestamp(start)._value
end_i8 = Timestamp(end)._value
if not left_inclusive or not right_inclusive:
if not left_inclusive and len(i8values) and i8values[0] == start_i8:
i8values = i8values[1:]
if not right_inclusive and len(i8values) and i8values[-1] == end_i8:
i8values = i8values[:-1]
dt64_values = i8values.view(f"datetime64[{unit}]")
dtype = tz_to_dtype(tz, unit=unit)
return cls._simple_new(dt64_values, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value)
if value is NaT:
return np.datetime64(value._value, self.unit)
else:
return value.as_unit(self.unit).asm8
def _scalar_from_string(self, value) -> Timestamp | NaTType:
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other) -> None:
if other is NaT:
return
self._assert_tzawareness_compat(other)
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x: np.datetime64) -> Timestamp | NaTType:
# GH#42228
value = x.view("i8")
ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz)
return ts
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
# incompatible with return type "ExtensionDtype" in supertype
# "ExtensionArray"
def dtype(self) -> np.dtype | DatetimeTZDtype: # type: ignore[override]
"""
The dtype for the DatetimeArray.
.. warning::
A future version of pandas will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
def tz(self) -> tzinfo | None:
"""
Return the timezone.
Returns
-------
datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
"""
# GH 18595
return getattr(self.dtype, "tz", None)
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
def tzinfo(self) -> tzinfo | None:
"""
Alias for tz attribute
"""
return self.tz
def is_normalized(self) -> bool:
"""
Returns True if all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz, reso=self._creso)
def _resolution_obj(self) -> Resolution:
return get_resolution(self.asi8, self.tz, reso=self._creso)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype)
def __iter__(self) -> Iterator:
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = (length // chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pydatetime(
data[start_i:end_i],
tz=self.tz,
box="timestamp",
reso=self._creso,
)
yield from converted
def astype(self, dtype, copy: bool = True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if copy:
return self.copy()
return self
elif isinstance(dtype, ExtensionDtype):
if not isinstance(dtype, DatetimeTZDtype):
# e.g. Sparse[datetime64[ns]]
return super().astype(dtype, copy=copy)
elif self.tz is None:
# pre-2.0 this did self.tz_localize(dtype.tz), which did not match
# the Series behavior which did
# values.tz_localize("UTC").tz_convert(dtype.tz)
raise TypeError(
"Cannot use .astype to convert from timezone-naive dtype to "
"timezone-aware dtype. Use obj.tz_localize instead or "
"series.dt.tz_localize instead"
)
else:
# tzaware unit conversion e.g. datetime64[s, UTC]
np_dtype = np.dtype(dtype.str)
res_values = astype_overflowsafe(self._ndarray, np_dtype, copy=copy)
return type(self)._simple_new(res_values, dtype=dtype, freq=self.freq)
elif (
self.tz is None
and is_datetime64_dtype(dtype)
and not is_unitless(dtype)
and is_supported_unit(get_unit_from_dtype(dtype))
):
# unit conversion e.g. datetime64[s]
res_values = astype_overflowsafe(self._ndarray, dtype, copy=True)
return type(self)._simple_new(res_values, dtype=res_values.dtype)
# TODO: preserve freq?
elif self.tz is not None and is_datetime64_dtype(dtype):
# pre-2.0 behavior for DTA/DTI was
# values.tz_convert("UTC").tz_localize(None), which did not match
# the Series behavior
raise TypeError(
"Cannot use .astype to convert from timezone-aware dtype to "
"timezone-naive dtype. Use obj.tz_localize(None) or "
"obj.tz_convert('UTC').tz_localize(None) instead."
)
elif (
self.tz is None
and is_datetime64_dtype(dtype)
and dtype != self.dtype
and is_unitless(dtype)
):
raise TypeError(
"Casting to unit-less dtype 'datetime64' is not supported. "
"Pass e.g. 'datetime64[ns]' instead."
)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
# -----------------------------------------------------------------
# Rendering Methods
def _format_native_types(
self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
) -> npt.NDArray[np.object_]:
from pandas.io.formats.format import get_format_datetime64_from_values
fmt = get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(
self.asi8, tz=self.tz, format=fmt, na_rep=na_rep, reso=self._creso
)
# -----------------------------------------------------------------
# Comparison Methods
def _has_same_tz(self, other) -> bool:
# vzone shouldn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
if not hasattr(other, "tzinfo"):
return False
other_tz = other.tzinfo
return timezones.tz_compare(self.tzinfo, other_tz)
def _assert_tzawareness_compat(self, other) -> None:
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, "tzinfo", None)
other_dtype = getattr(other, "dtype", None)
if is_datetime64tz_dtype(other_dtype):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
# Arithmetic Methods
def _add_offset(self, offset) -> DatetimeArray:
assert not isinstance(offset, Tick)
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
try:
result = offset._apply_array(values).view(values.dtype)
except NotImplementedError:
warnings.warn(
"Non-vectorized DateOffset being applied to Series or DatetimeIndex.",
PerformanceWarning,
stacklevel=find_stack_level(),
)
result = self.astype("O") + offset
result = type(self)._from_sequence(result).as_unit(self.unit)
if not len(self):
# GH#30336 _from_sequence won't be able to infer self.tz
return result.tz_localize(self.tz)
else:
result = DatetimeArray._simple_new(result, dtype=result.dtype)
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self) -> npt.NDArray[np.int64]:
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
if self.tz is None or timezones.is_utc(self.tz):
# Avoid the copy that would be made in tzconversion
return self.asi8
return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)
def tz_convert(self, tz) -> DatetimeArray:
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz, unit=self.unit)
return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)
def tz_localize(
self,
tz,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
) -> DatetimeArray:
"""
Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
This method can also be used to do the inverse -- to create a time
zone unaware object from an aware object. To that end, pass `tz=None`.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[ns, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
if self.tz is not None:
if tz is None:
new_dates = tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
self.asi8,
tz,
ambiguous=ambiguous,
nonexistent=nonexistent,
creso=self._creso,
)
new_dates = new_dates.view(f"M8[{self.unit}]")
dtype = tz_to_dtype(tz, unit=self.unit)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):
# we can preserve freq
# TODO: Also for fixed-offsets
freq = self.freq
elif tz is None and self.tz is None:
# no-op
freq = self.freq
return self._simple_new(new_dates, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self) -> npt.NDArray[np.object_]:
"""
Return an ndarray of datetime.datetime objects.
Returns
-------
numpy.ndarray
"""
return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)
def normalize(self) -> DatetimeArray:
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._creso)
dt64_values = new_values.view(self._ndarray.dtype)
dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype)
dta = dta._with_freq("infer")
if self.tz is not None:
dta = dta.tz_localize(self.tz)
return dta
def to_period(self, freq=None) -> PeriodArray:
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn(
"Converting to PeriodArray/Index representation "
"will drop timezone information.",
UserWarning,
stacklevel=find_stack_level(),
)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError(
"You must pass a freq argument as current index has none."
)
res = get_period_alias(freq)
# https://github.com/pandas-dev/pandas/issues/33358
if res is None:
res = freq
freq = res
return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None) -> npt.NDArray[np.object_]:
"""
Return the month names with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale (``'en_US.utf8'``). Use the command
``locale -a`` on your terminal on Unix systems to find your locale
language code.
Returns
-------
Series or Index
Series or Index of month names.
Examples
--------
>>> s = pd.Series(pd.date_range(start='2018-01', freq='M', periods=3))
>>> s
0 2018-01-31
1 2018-02-28
2 2018-03-31
dtype: datetime64[ns]
>>> s.dt.month_name()
0 January
1 February
2 March
dtype: object
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
Using the ``locale`` parameter you can set a different locale language,
for example: ``idx.month_name(locale='pt_BR.utf8')`` will return month
names in Brazilian Portuguese language.
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name(locale='pt_BR.utf8') # doctest: +SKIP
Index(['Janeiro', 'Fevereiro', 'Março'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(
values, "month_name", locale=locale, reso=self._creso
)
result = self._maybe_mask_results(result, fill_value=None)
return result
def day_name(self, locale=None) -> npt.NDArray[np.object_]:
"""
Return the day names with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale (``'en_US.utf8'``). Use the command
``locale -a`` on your terminal on Unix systems to find your locale
language code.
Returns
-------
Series or Index
Series or Index of day names.
Examples
--------
>>> s = pd.Series(pd.date_range(start='2018-01-01', freq='D', periods=3))
>>> s
0 2018-01-01
1 2018-01-02
2 2018-01-03
dtype: datetime64[ns]
>>> s.dt.day_name()
0 Monday
1 Tuesday
2 Wednesday
dtype: object
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
Using the ``locale`` parameter you can set a different locale language,
for example: ``idx.day_name(locale='pt_BR.utf8')`` will return day
names in Brazilian Portuguese language.
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
>>> idx.day_name(locale='pt_BR.utf8') # doctest: +SKIP
Index(['Segunda', 'Terça', 'Quarta'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(
values, "day_name", locale=locale, reso=self._creso
)
result = self._maybe_mask_results(result, fill_value=None)
return result
def time(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of :class:`datetime.time` objects.
The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="time", reso=self._creso)
def timetz(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of :class:`datetime.time` objects with timezones.
The time part of the Timestamps.
"""
return ints_to_pydatetime(self.asi8, self.tz, box="time", reso=self._creso)
def date(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of python :class:`datetime.date` objects.
Namely, the date part of Timestamps without time and
timezone information.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="date", reso=self._creso)
def isocalendar(self) -> DataFrame:
"""
Calculate year, week, and day according to the ISO 8601 standard.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
With columns year, week and day.
See Also
--------
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
week number, and weekday for the given Timestamp object.
datetime.date.isocalendar : Return a named tuple object with
three components: year, week and weekday.
Examples
--------
>>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
>>> idx.isocalendar()
year week day
2019-12-29 2019 52 7
2019-12-30 2020 1 1
2019-12-31 2020 1 2
2020-01-01 2020 1 3
>>> idx.isocalendar().week
2019-12-29 52
2019-12-30 1
2019-12-31 1
2020-01-01 1
Freq: D, Name: week, dtype: UInt32
"""
from pandas import DataFrame
values = self._local_timestamps()
sarray = fields.build_isocalendar_sarray(values, reso=self._creso)
iso_calendar_df = DataFrame(
sarray, columns=["year", "week", "day"], dtype="UInt32"
)
if self._hasna:
iso_calendar_df.iloc[self._isnan] = None
return iso_calendar_df
year = _field_accessor(
"year",
"Y",
"""
The year of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="Y")
... )
>>> datetime_series
0 2000-12-31
1 2001-12-31
2 2002-12-31
dtype: datetime64[ns]
>>> datetime_series.dt.year
0 2000
1 2001
2 2002
dtype: int32
""",
)
month = _field_accessor(
"month",
"M",
"""
The month as January=1, December=12.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="M")
... )
>>> datetime_series
0 2000-01-31
1 2000-02-29
2 2000-03-31
dtype: datetime64[ns]
>>> datetime_series.dt.month
0 1
1 2
2 3
dtype: int32
""",
)
day = _field_accessor(
"day",
"D",
"""
The day of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="D")
... )
>>> datetime_series
0 2000-01-01
1 2000-01-02
2 2000-01-03
dtype: datetime64[ns]
>>> datetime_series.dt.day
0 1
1 2
2 3
dtype: int32
""",
)
hour = _field_accessor(
"hour",
"h",
"""
The hours of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="h")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 01:00:00
2 2000-01-01 02:00:00
dtype: datetime64[ns]
>>> datetime_series.dt.hour
0 0
1 1
2 2
dtype: int32
""",
)
minute = _field_accessor(
"minute",
"m",
"""
The minutes of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="T")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:01:00
2 2000-01-01 00:02:00
dtype: datetime64[ns]
>>> datetime_series.dt.minute
0 0
1 1
2 2
dtype: int32
""",
)
second = _field_accessor(
"second",
"s",
"""
The seconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="s")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
dtype: datetime64[ns]
>>> datetime_series.dt.second
0 0
1 1
2 2
dtype: int32
""",
)
microsecond = _field_accessor(
"microsecond",
"us",
"""
The microseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="us")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000
1 2000-01-01 00:00:00.000001
2 2000-01-01 00:00:00.000002
dtype: datetime64[ns]
>>> datetime_series.dt.microsecond
0 0
1 1
2 2
dtype: int32
""",
)
nanosecond = _field_accessor(
"nanosecond",
"ns",
"""
The nanoseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="ns")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000000
1 2000-01-01 00:00:00.000000001
2 2000-01-01 00:00:00.000000002
dtype: datetime64[ns]
>>> datetime_series.dt.nanosecond
0 0
1 1
2 2
dtype: int32
""",
)
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
Returns
-------
Series or Index
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
Freq: D, dtype: int32
"""
day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc)
dayofweek = day_of_week
weekday = day_of_week
day_of_year = _field_accessor(
"dayofyear",
"doy",
"""
The ordinal day of the year.
""",
)
dayofyear = day_of_year
quarter = _field_accessor(
"quarter",
"q",
"""
The quarter of the date.
""",
)
days_in_month = _field_accessor(
"days_in_month",
"dim",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
_is_month_doc = """
Indicates whether the date is the {first_or_last} day of the month.
Returns
-------
Series or array
For Series, returns a Series with boolean values.
For DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_start
array([False, False, True])
>>> idx.is_month_end
array([False, True, False])
"""
is_month_start = _field_accessor(
"is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first")
)
is_month_end = _field_accessor(
"is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last")
)
is_quarter_start = _field_accessor(
"is_quarter_start",
"is_quarter_start",
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter end.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""",
)
is_quarter_end = _field_accessor(
"is_quarter_end",
"is_quarter_end",
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""",
)
is_year_start = _field_accessor(
"is_year_start",
"is_year_start",
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_start
array([False, False, True])
""",
)
is_year_end = _field_accessor(
"is_year_end",
"is_year_end",
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""",
)
is_leap_year = _field_accessor(
"is_leap_year",
"is_leap_year",
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[ns]', freq='A-DEC')
>>> idx.is_leap_year
array([ True, False, False])
>>> dates_series = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""",
)
def to_julian_date(self) -> npt.NDArray[np.float64]:
"""
Convert Datetime Array to float64 ndarray of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
https://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (
day
+ np.fix((153 * month - 457) / 5)
+ 365 * year
+ np.floor(year / 4)
- np.floor(year / 100)
+ np.floor(year / 400)
+ 1_721_118.5
+ (
self.hour
+ self.minute / 60
+ self.second / 3600
+ self.microsecond / 3600 / 10**6
+ self.nanosecond / 3600 / 10**9
)
/ 24
)
# -----------------------------------------------------------------
# Reductions
def std(
self,
axis=None,
dtype=None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
"""
Return sample standard deviation over requested axis.
Normalized by N-1 by default. This can be changed using the ddof argument
Parameters
----------
axis : int optional, default None
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to `None`.
ddof : int, default 1
Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result will be
NA.
Returns
-------
Timedelta
"""
# Because std is translation-invariant, we can get self.std
# by calculating (self - Timestamp(0)).std, and we can do it
# without creating a copy by using a view on self._ndarray
from pandas.core.arrays import TimedeltaArray
# Find the td64 dtype with the same resolution as our dt64 dtype
dtype_str = self._ndarray.dtype.name.replace("datetime64", "timedelta64")
dtype = np.dtype(dtype_str)
tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype)
return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna)
The provided code snippet includes necessary dependencies for implementing the `_new_DatetimeIndex` function. Write a Python function `def _new_DatetimeIndex(cls, d)` to solve the following problem:
This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__
Here is the function:
def _new_DatetimeIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
if "data" in d and not isinstance(d["data"], DatetimeIndex):
# Avoid need to verify integrity by calling simple_new directly
data = d.pop("data")
if not isinstance(data, DatetimeArray):
# For backward compat with older pickles, we may need to construct
# a DatetimeArray to adapt to the newer _simple_new signature
tz = d.pop("tz")
freq = d.pop("freq")
dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)
else:
dta = data
for key in ["tz", "freq"]:
# These are already stored in our DatetimeArray; if they are
# also in the pickle and don't match, we have a problem.
if key in d:
assert d[key] == getattr(dta, key)
d.pop(key)
result = cls._simple_new(dta, **d)
else:
with warnings.catch_warnings():
# TODO: If we knew what was going in to **d, we might be able to
# go through _simple_new instead
warnings.simplefilter("ignore")
result = cls.__new__(cls, **d)
return result | This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__ |
173,183 | from __future__ import annotations
import datetime as dt
import operator
from typing import (
TYPE_CHECKING,
Hashable,
)
import warnings
import numpy as np
import pytz
from pandas._libs import (
NaT,
Period,
Timestamp,
index as libindex,
lib,
)
from pandas._libs.tslibs import (
Resolution,
periods_per_day,
timezones,
to_offset,
)
from pandas._libs.tslibs.offsets import prefix_mapping
from pandas._typing import (
Dtype,
DtypeObj,
Frequency,
IntervalClosedType,
TimeAmbiguous,
TimeNonexistent,
npt,
)
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import is_valid_na_for_dtype
from pandas.core.arrays.datetimes import (
DatetimeArray,
tz_to_dtype,
)
import pandas.core.common as com
from pandas.core.indexes.base import (
Index,
maybe_extract_name,
)
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
from pandas.core.tools.times import to_time
class DatetimeIndex(DatetimeTimedeltaMixin):
"""
Immutable ndarray-like of datetime64 data.
Represented internally as int64, and which can be boxed to Timestamp objects
that are subclasses of datetime and carry metadata.
.. versionchanged:: 2.0.0
The various numeric date/time attributes (:attr:`~DatetimeIndex.day`,
:attr:`~DatetimeIndex.month`, :attr:`~DatetimeIndex.year` etc.) now have dtype
``int32``. Previously they had dtype ``int64``.
Parameters
----------
data : array-like (1-dimensional)
Datetime-like data to construct index with.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str
Set the Timezone of the data.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
closed : {'left', 'right'}, optional
Set whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from 03:00
DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC
and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter
dictates how ambiguous times should be handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times.
dayfirst : bool, default False
If True, parse dates in `data` with the day first order.
yearfirst : bool, default False
If True parse dates in `data` with the year first order.
dtype : numpy.dtype or DatetimeTZDtype or str, default None
Note that the only NumPy dtype allowed is ‘datetime64[ns]’.
copy : bool, default False
Make a copy of input ndarray.
name : label, default None
Name to be stored in the index.
Attributes
----------
year
month
day
hour
minute
second
microsecond
nanosecond
date
time
timetz
dayofyear
day_of_year
weekofyear
week
dayofweek
day_of_week
weekday
quarter
tz
freq
freqstr
is_month_start
is_month_end
is_quarter_start
is_quarter_end
is_year_start
is_year_end
is_leap_year
inferred_freq
Methods
-------
normalize
strftime
snap
tz_convert
tz_localize
round
floor
ceil
to_period
to_pydatetime
to_series
to_frame
month_name
day_name
mean
std
See Also
--------
Index : The base pandas Index type.
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
to_datetime : Convert argument to datetime.
date_range : Create a fixed-frequency DatetimeIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
"""
_typ = "datetimeindex"
_data_cls = DatetimeArray
_supports_partial_string_indexing = True
def _engine_type(self) -> type[libindex.DatetimeEngine]:
return libindex.DatetimeEngine
_data: DatetimeArray
tz: dt.tzinfo | None
# --------------------------------------------------------------------
# methods that dispatch to DatetimeArray and wrap result
def strftime(self, date_format) -> Index:
arr = self._data.strftime(date_format)
return Index(arr, name=self.name, dtype=object)
def tz_convert(self, tz) -> DatetimeIndex:
arr = self._data.tz_convert(tz)
return type(self)._simple_new(arr, name=self.name, refs=self._references)
def tz_localize(
self,
tz,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
) -> DatetimeIndex:
arr = self._data.tz_localize(tz, ambiguous, nonexistent)
return type(self)._simple_new(arr, name=self.name)
def to_period(self, freq=None) -> PeriodIndex:
from pandas.core.indexes.api import PeriodIndex
arr = self._data.to_period(freq)
return PeriodIndex._simple_new(arr, name=self.name)
def to_julian_date(self) -> Index:
arr = self._data.to_julian_date()
return Index._simple_new(arr, name=self.name)
def isocalendar(self) -> DataFrame:
df = self._data.isocalendar()
return df.set_index(self)
def _resolution_obj(self) -> Resolution:
return self._data._resolution_obj
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
freq: Frequency | lib.NoDefault = lib.no_default,
tz=lib.no_default,
normalize: bool = False,
closed=None,
ambiguous: TimeAmbiguous = "raise",
dayfirst: bool = False,
yearfirst: bool = False,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> DatetimeIndex:
if is_scalar(data):
cls._raise_scalar_data_error(data)
# - Cases checked above all return/raise before reaching here - #
name = maybe_extract_name(name, data, cls)
if (
isinstance(data, DatetimeArray)
and freq is lib.no_default
and tz is lib.no_default
and dtype is None
):
# fastpath, similar logic in TimedeltaIndex.__new__;
# Note in this particular case we retain non-nano.
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
dtarr = DatetimeArray._from_sequence_not_strict(
data,
dtype=dtype,
copy=copy,
tz=tz,
freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
refs = None
if not copy and isinstance(data, (Index, ABCSeries)):
refs = data._references
subarr = cls._simple_new(dtarr, name=name, refs=refs)
return subarr
# --------------------------------------------------------------------
def _is_dates_only(self) -> bool:
"""
Return a boolean if we are only dates (and don't have a timezone)
Returns
-------
bool
"""
from pandas.io.formats.format import is_dates_only
# error: Argument 1 to "is_dates_only" has incompatible type
# "Union[ExtensionArray, ndarray]"; expected "Union[ndarray,
# DatetimeArray, Index, DatetimeIndex]"
return self.tz is None and is_dates_only(self._values) # type: ignore[arg-type]
def __reduce__(self):
d = {"data": self._data, "name": self.name}
return _new_DatetimeIndex, (type(self), d), None
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if self.tz is not None:
# If we have tz, we can compare to tzaware
return is_datetime64tz_dtype(dtype)
# if we dont have tz, we can only compare to tznaive
return is_datetime64_dtype(dtype)
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self):
from pandas.io.formats.format import get_format_datetime64
formatter = get_format_datetime64(is_dates_only_=self._is_dates_only)
return lambda x: f"'{formatter(x)}'"
# --------------------------------------------------------------------
# Set Operation Methods
def _can_range_setop(self, other) -> bool:
# GH 46702: If self or other have non-UTC tzs, DST transitions prevent
# range representation due to no singular step
if (
self.tz is not None
and not timezones.is_utc(self.tz)
and not timezones.is_fixed_offset(self.tz)
):
return False
if (
other.tz is not None
and not timezones.is_utc(other.tz)
and not timezones.is_fixed_offset(other.tz)
):
return False
return super()._can_range_setop(other)
# --------------------------------------------------------------------
def _get_time_micros(self) -> npt.NDArray[np.int64]:
"""
Return the number of microseconds since midnight.
Returns
-------
ndarray[int64_t]
"""
values = self._data._local_timestamps()
ppd = periods_per_day(self._data._creso)
frac = values % ppd
if self.unit == "ns":
micros = frac // 1000
elif self.unit == "us":
micros = frac
elif self.unit == "ms":
micros = frac * 1000
elif self.unit == "s":
micros = frac * 1_000_000
else: # pragma: no cover
raise NotImplementedError(self.unit)
micros[self._isnan] = -1
return micros
def snap(self, freq: Frequency = "S") -> DatetimeIndex:
"""
Snap time stamps to nearest occurring frequency.
Returns
-------
DatetimeIndex
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
dta = self._data.copy()
for i, v in enumerate(self):
s = v
if not freq.is_on_offset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
dta[i] = s
return DatetimeIndex._simple_new(dta, name=self.name)
# --------------------------------------------------------------------
# Indexing Methods
def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : Resolution
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
per = Period(parsed, freq=reso.attr_abbrev)
start, end = per.start_time, per.end_time
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
# timezone
start = start.tz_localize(parsed.tzinfo)
end = end.tz_localize(parsed.tzinfo)
if parsed.tzinfo is not None:
if self.tz is None:
raise ValueError(
"The index must be timezone aware when indexing "
"with a date string with a UTC offset"
)
# The flipped case with parsed.tz is None and self.tz is not None
# is ruled out bc parsed and reso are produced by _parse_with_reso,
# which localizes parsed.
return start, end
def _parse_with_reso(self, label: str):
parsed, reso = super()._parse_with_reso(label)
parsed = Timestamp(parsed)
if self.tz is not None and parsed.tzinfo is None:
# we special-case timezone-naive strings and timezone-aware
# DatetimeIndex
# https://github.com/pandas-dev/pandas/pull/36148#issuecomment-687883081
parsed = parsed.tz_localize(self.tz)
return parsed, reso
def _disallow_mismatched_indexing(self, key) -> None:
"""
Check for mismatched-tzawareness indexing and re-raise as KeyError.
"""
# we get here with isinstance(key, self._data._recognized_scalars)
try:
# GH#36148
self._data._assert_tzawareness_compat(key)
except TypeError as err:
raise KeyError(key) from err
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
self._check_indexing_error(key)
orig_key = key
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
if isinstance(key, self._data._recognized_scalars):
# needed to localize naive datetimes
self._disallow_mismatched_indexing(key)
key = Timestamp(key)
elif isinstance(key, str):
try:
parsed, reso = self._parse_with_reso(key)
except (ValueError, pytz.NonExistentTimeError) as err:
raise KeyError(key) from err
self._disallow_mismatched_indexing(parsed)
if self._can_partial_date_slice(reso):
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
key = parsed
elif isinstance(key, dt.timedelta):
# GH#20464
raise TypeError(
f"Cannot index {type(self).__name__} with {type(key).__name__}"
)
elif isinstance(key, dt.time):
return self.indexer_at_time(key)
else:
# unrecognized type
raise KeyError(key)
try:
return Index.get_loc(self, key)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_slice_bound(self, label, side: str):
# GH#42855 handle date here instead of get_slice_bound
if isinstance(label, dt.date) and not isinstance(label, dt.datetime):
# Pandas supports slicing with dates, treated as datetimes at midnight.
# https://github.com/pandas-dev/pandas/issues/31501
label = Timestamp(label).to_pydatetime()
label = super()._maybe_cast_slice_bound(label, side)
self._data._assert_tzawareness_compat(label)
return Timestamp(label)
def slice_indexer(self, start=None, end=None, step=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, dt.time) and isinstance(end, dt.time):
if step is not None and step != 1:
raise ValueError("Must have step size of 1 with time slices")
return self.indexer_between_time(start, end)
if isinstance(start, dt.time) or isinstance(end, dt.time):
raise KeyError("Cannot mix time and non-time slice keys")
def check_str_or_none(point) -> bool:
return point is not None and not isinstance(point, str)
# GH#33146 if start and end are combinations of str and None and Index is not
# monotonic, we can not use Index.slice_indexer because it does not honor the
# actual elements, is only searching for start and end
if (
check_str_or_none(start)
or check_str_or_none(end)
or self.is_monotonic_increasing
):
return Index.slice_indexer(self, start, end, step)
mask = np.array(True)
raise_mask = np.array(True)
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, "left")
mask = start_casted <= self
raise_mask = start_casted == self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, "right")
mask = (self <= end_casted) & mask
raise_mask = (end_casted == self) | raise_mask
if not raise_mask.any():
raise KeyError(
"Value based partial slicing on non-monotonic DatetimeIndexes "
"with non-existing keys is not allowed.",
)
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
# --------------------------------------------------------------------
def inferred_type(self) -> str:
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return "datetime64"
def indexer_at_time(self, time, asof: bool = False) -> npt.NDArray[np.intp]:
"""
Return index locations of values at particular time of day.
Parameters
----------
time : datetime.time or str
Time passed in either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p").
Returns
-------
np.ndarray[np.intp]
See Also
--------
indexer_between_time : Get index locations of values between particular
times of day.
DataFrame.at_time : Select values at particular time of day.
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, str):
from dateutil.parser import parse
time = parse(time).time()
if time.tzinfo:
if self.tz is None:
raise ValueError("Index must be timezone aware.")
time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
else:
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (time_micros == micros).nonzero()[0]
def indexer_between_time(
self, start_time, end_time, include_start: bool = True, include_end: bool = True
) -> npt.NDArray[np.intp]:
"""
Return index locations of values between particular times of day.
Parameters
----------
start_time, end_time : datetime.time, str
Time passed either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
include_start : bool, default True
include_end : bool, default True
Returns
-------
np.ndarray[np.intp]
See Also
--------
indexer_at_time : Get index locations of values at particular time of day.
DataFrame.between_time : Select values between particular times of day.
"""
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
return mask.nonzero()[0]
def date_range(
start=None,
end=None,
periods=None,
freq=None,
tz=None,
normalize: bool = False,
name: Hashable = None,
inclusive: IntervalClosedType = "both",
*,
unit: str | None = None,
**kwargs,
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex.
Returns the range of equally spaced time points (where the difference between any
two adjacent points is specified by the given frequency) such that they all
satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp.,
the first and last time points in that range that fall on the boundary of ``freq``
(if given as a frequency string) or that are valid for ``freq`` (if given as a
:class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``,
``end``, or ``freq`` is *not* specified, this missing parameter can be computed
given ``periods``, the number of timesteps in the range. See the note below.)
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str, datetime.timedelta, or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive unless timezone-aware datetime-likes are passed.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
inclusive : {"both", "neither", "left", "right"}, default "both"
Include boundaries; Whether to set each bound as closed or open.
.. versionadded:: 1.4.0
unit : str, default None
Specify the desired resolution of the result.
.. versionadded:: 2.0.0
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
timedelta_range : Return a fixed frequency TimedeltaIndex.
period_range : Return a fixed frequency PeriodIndex.
interval_range : Return a fixed frequency IntervalIndex.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pd.date_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify timezone-aware `start` and `end`, with the default daily frequency.
>>> pd.date_range(
... start=pd.to_datetime("1/1/2018").tz_localize("Europe/Berlin"),
... end=pd.to_datetime("1/08/2018").tz_localize("Europe/Berlin"),
... )
DatetimeIndex(['2018-01-01 00:00:00+01:00', '2018-01-02 00:00:00+01:00',
'2018-01-03 00:00:00+01:00', '2018-01-04 00:00:00+01:00',
'2018-01-05 00:00:00+01:00', '2018-01-06 00:00:00+01:00',
'2018-01-07 00:00:00+01:00', '2018-01-08 00:00:00+01:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='D')
Specify `start` and `periods`, the number of periods (days).
>>> pd.date_range(start='1/1/2018', periods=8)
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `end` and `periods`, the number of periods (days).
>>> pd.date_range(end='1/1/2018', periods=8)
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pd.date_range(start='1/1/2018', periods=5, freq='M')
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq='M')
Multiples are allowed
>>> pd.date_range(start='1/1/2018', periods=5, freq='3M')
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
`freq` can also be specified as an Offset object.
>>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
Specify `tz` to set the timezone.
>>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
'2018-01-05 00:00:00+09:00'],
dtype='datetime64[ns, Asia/Tokyo]', freq='D')
`inclusive` controls whether to include `start` and `end` that are on the
boundary. The default, "both", includes boundary points on either end.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive="both")
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
Use ``inclusive='left'`` to exclude `end` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='left')
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
dtype='datetime64[ns]', freq='D')
Use ``inclusive='right'`` to exclude `start` if it falls on the boundary, and
similarly ``inclusive='neither'`` will exclude both `start` and `end`.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='right')
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
**Specify a unit**
>>> pd.date_range(start="2017-01-01", periods=10, freq="100AS", unit="s")
DatetimeIndex(['2017-01-01', '2117-01-01', '2217-01-01', '2317-01-01',
'2417-01-01', '2517-01-01', '2617-01-01', '2717-01-01',
'2817-01-01', '2917-01-01'],
dtype='datetime64[s]', freq='100AS-JAN')
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
dtarr = DatetimeArray._generate_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
inclusive=inclusive,
unit=unit,
**kwargs,
)
return DatetimeIndex._simple_new(dtarr, name=name)
class Hashable(Protocol, metaclass=ABCMeta):
# TODO: This is special, in that a subclass of a hashable class may not be hashable
# (for example, list vs. object). It's not obvious how to represent this. This class
# is currently mostly useless for static checking.
def __hash__(self) -> int: ...
Frequency = Union[str, "BaseOffset"]
IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]]
The provided code snippet includes necessary dependencies for implementing the `bdate_range` function. Write a Python function `def bdate_range( start=None, end=None, periods: int | None = None, freq: Frequency = "B", tz=None, normalize: bool = True, name: Hashable = None, weekmask=None, holidays=None, inclusive: IntervalClosedType = "both", **kwargs, ) -> DatetimeIndex` to solve the following problem:
Return a fixed frequency DatetimeIndex with business day as the default. Parameters ---------- start : str or datetime-like, default None Left bound for generating dates. end : str or datetime-like, default None Right bound for generating dates. periods : int, default None Number of periods to generate. freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'B' Frequency strings can have multiples, e.g. '5H'. The default is business daily ('B'). tz : str or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. weekmask : str or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; Whether to set each bound as closed or open. .. versionadded:: 1.4.0 **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex Notes ----- Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. Specifying ``freq`` is a requirement for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not desired. To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Note how the two weekend days are skipped in the result. >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-08'], dtype='datetime64[ns]', freq='B')
Here is the function:
def bdate_range(
start=None,
end=None,
periods: int | None = None,
freq: Frequency = "B",
tz=None,
normalize: bool = True,
name: Hashable = None,
weekmask=None,
holidays=None,
inclusive: IntervalClosedType = "both",
**kwargs,
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex with business day as the default.
Parameters
----------
start : str or datetime-like, default None
Left bound for generating dates.
end : str or datetime-like, default None
Right bound for generating dates.
periods : int, default None
Number of periods to generate.
freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'B'
Frequency strings can have multiples, e.g. '5H'. The default is
business daily ('B').
tz : str or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
weekmask : str or None, default None
Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
only used when custom frequency strings are passed. The default
value None is equivalent to 'Mon Tue Wed Thu Fri'.
holidays : list-like or None, default None
Dates to exclude from the set of valid business days, passed to
``numpy.busdaycalendar``, only used when custom frequency strings
are passed.
inclusive : {"both", "neither", "left", "right"}, default "both"
Include boundaries; Whether to set each bound as closed or open.
.. versionadded:: 1.4.0
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
DatetimeIndex
Notes
-----
Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. Specifying ``freq`` is a requirement
for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not
desired.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Note how the two weekend days are skipped in the result.
>>> pd.bdate_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-08'],
dtype='datetime64[ns]', freq='B')
"""
if freq is None:
msg = "freq must be specified for bdate_range; use date_range instead"
raise TypeError(msg)
if isinstance(freq, str) and freq.startswith("C"):
try:
weekmask = weekmask or "Mon Tue Wed Thu Fri"
freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
except (KeyError, TypeError) as err:
msg = f"invalid custom frequency string: {freq}"
raise ValueError(msg) from err
elif holidays or weekmask:
msg = (
"a custom frequency string is required when holidays or "
f"weekmask are passed, got frequency {freq}"
)
raise ValueError(msg)
return date_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
name=name,
inclusive=inclusive,
**kwargs,
) | Return a fixed frequency DatetimeIndex with business day as the default. Parameters ---------- start : str or datetime-like, default None Left bound for generating dates. end : str or datetime-like, default None Right bound for generating dates. periods : int, default None Number of periods to generate. freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'B' Frequency strings can have multiples, e.g. '5H'. The default is business daily ('B'). tz : str or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. weekmask : str or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; Whether to set each bound as closed or open. .. versionadded:: 1.4.0 **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex Notes ----- Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. Specifying ``freq`` is a requirement for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not desired. To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Note how the two weekend days are skipped in the result. >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-08'], dtype='datetime64[ns]', freq='B') |
173,184 | from __future__ import annotations
import datetime as dt
import operator
from typing import (
TYPE_CHECKING,
Hashable,
)
import warnings
import numpy as np
import pytz
from pandas._libs import (
NaT,
Period,
Timestamp,
index as libindex,
lib,
)
from pandas._libs.tslibs import (
Resolution,
periods_per_day,
timezones,
to_offset,
)
from pandas._libs.tslibs.offsets import prefix_mapping
from pandas._typing import (
Dtype,
DtypeObj,
Frequency,
IntervalClosedType,
TimeAmbiguous,
TimeNonexistent,
npt,
)
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import is_valid_na_for_dtype
from pandas.core.arrays.datetimes import (
DatetimeArray,
tz_to_dtype,
)
import pandas.core.common as com
from pandas.core.indexes.base import (
Index,
maybe_extract_name,
)
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
from pandas.core.tools.times import to_time
def _time_to_micros(time_obj: dt.time) -> int:
seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second
return 1_000_000 * seconds + time_obj.microsecond | null |
173,185 | from __future__ import annotations
import numpy as np
from pandas._typing import AxisInt
AxisInt = int
def shift(values: np.ndarray, periods: int, axis: AxisInt, fill_value) -> np.ndarray:
new_values = values
if periods == 0 or values.size == 0:
return new_values.copy()
# make sure array sent to np.roll is c_contiguous
f_ordered = values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if new_values.size:
new_values = np.roll(
new_values,
np.intp(periods),
axis=axis,
)
axis_indexer = [slice(None)] * values.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return new_values | null |
173,186 | from __future__ import annotations
import functools
from typing import (
TYPE_CHECKING,
cast,
overload,
)
import numpy as np
from pandas._libs import (
algos as libalgos,
lib,
)
from pandas._typing import (
ArrayLike,
AxisInt,
npt,
)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int,
is_1d_only_ea_obj,
)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
def _get_take_nd_function(
ndim: int,
arr_dtype: np.dtype,
out_dtype: np.dtype,
axis: AxisInt = 0,
mask_info=None,
):
"""
Get the appropriate "take" implementation for the given dimension, axis
and dtypes.
"""
func = None
if ndim <= 2:
# for this part we don't need `mask_info` -> use the cached algo lookup
func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis)
if func is None:
def func(arr, indexer, out, fill_value=np.nan) -> None:
indexer = ensure_platform_int(indexer)
_take_nd_object(
arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info
)
return func
def _take_preprocess_indexer_and_fill_value(
arr: np.ndarray,
indexer: npt.NDArray[np.intp],
fill_value,
allow_fill: bool,
mask: npt.NDArray[np.bool_] | None = None,
):
mask_info: tuple[np.ndarray | None, bool] | None = None
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype:
# check if promotion is actually required based on indexer
if mask is not None:
needs_masking = True
else:
mask = indexer == -1
needs_masking = bool(mask.any())
mask_info = mask, needs_masking
if not needs_masking:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
return dtype, fill_value, mask_info
ArrayLike = Union["ExtensionArray", np.ndarray]
The provided code snippet includes necessary dependencies for implementing the `take_1d` function. Write a Python function `def take_1d( arr: ArrayLike, indexer: npt.NDArray[np.intp], fill_value=None, allow_fill: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> ArrayLike` to solve the following problem:
Specialized version for 1D arrays. Differences compared to `take_nd`: - Assumes input array has already been converted to numpy array / EA - Assumes indexer is already guaranteed to be intp dtype ndarray - Only works for 1D arrays To ensure the lowest possible overhead. Note: similarly to `take_nd`, this function assumes that the indexer is a valid(ated) indexer with no out of bound indices. Parameters ---------- arr : np.ndarray or ExtensionArray Input array. indexer : ndarray 1-D array of indices to take (validated indices, intp dtype). fill_value : any, default np.nan Fill value to replace -1 values with allow_fill : bool, default True If False, indexer is assumed to contain no -1 values so no filling will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. mask : np.ndarray, optional, default None If `allow_fill` is True, and the mask (where indexer == -1) is already known, it can be passed to avoid recomputation.
Here is the function:
def take_1d(
arr: ArrayLike,
indexer: npt.NDArray[np.intp],
fill_value=None,
allow_fill: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> ArrayLike:
"""
Specialized version for 1D arrays. Differences compared to `take_nd`:
- Assumes input array has already been converted to numpy array / EA
- Assumes indexer is already guaranteed to be intp dtype ndarray
- Only works for 1D arrays
To ensure the lowest possible overhead.
Note: similarly to `take_nd`, this function assumes that the indexer is
a valid(ated) indexer with no out of bound indices.
Parameters
----------
arr : np.ndarray or ExtensionArray
Input array.
indexer : ndarray
1-D array of indices to take (validated indices, intp dtype).
fill_value : any, default np.nan
Fill value to replace -1 values with
allow_fill : bool, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
mask : np.ndarray, optional, default None
If `allow_fill` is True, and the mask (where indexer == -1) is already
known, it can be passed to avoid recomputation.
"""
if not isinstance(arr, np.ndarray):
# ExtensionArray -> dispatch to their method
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if not allow_fill:
return arr.take(indexer)
dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
arr, indexer, fill_value, True, mask
)
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
out = np.empty(indexer.shape, dtype=dtype)
func = _get_take_nd_function(
arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info
)
func(arr, indexer, out, fill_value)
return out | Specialized version for 1D arrays. Differences compared to `take_nd`: - Assumes input array has already been converted to numpy array / EA - Assumes indexer is already guaranteed to be intp dtype ndarray - Only works for 1D arrays To ensure the lowest possible overhead. Note: similarly to `take_nd`, this function assumes that the indexer is a valid(ated) indexer with no out of bound indices. Parameters ---------- arr : np.ndarray or ExtensionArray Input array. indexer : ndarray 1-D array of indices to take (validated indices, intp dtype). fill_value : any, default np.nan Fill value to replace -1 values with allow_fill : bool, default True If False, indexer is assumed to contain no -1 values so no filling will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. mask : np.ndarray, optional, default None If `allow_fill` is True, and the mask (where indexer == -1) is already known, it can be passed to avoid recomputation. |
173,187 | from __future__ import annotations
import functools
from typing import (
TYPE_CHECKING,
cast,
overload,
)
import numpy as np
from pandas._libs import (
algos as libalgos,
lib,
)
from pandas._typing import (
ArrayLike,
AxisInt,
npt,
)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int,
is_1d_only_ea_obj,
)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
def _convert_wrapper(f, conv_dtype):
def wrapper(
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
) -> None:
if conv_dtype == object:
# GH#39755 avoid casting dt64/td64 to integers
arr = ensure_wrapped_if_datetimelike(arr)
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
_take_2d_multi_dict = {
("int8", "int8"): libalgos.take_2d_multi_int8_int8,
("int8", "int32"): libalgos.take_2d_multi_int8_int32,
("int8", "int64"): libalgos.take_2d_multi_int8_int64,
("int8", "float64"): libalgos.take_2d_multi_int8_float64,
("int16", "int16"): libalgos.take_2d_multi_int16_int16,
("int16", "int32"): libalgos.take_2d_multi_int16_int32,
("int16", "int64"): libalgos.take_2d_multi_int16_int64,
("int16", "float64"): libalgos.take_2d_multi_int16_float64,
("int32", "int32"): libalgos.take_2d_multi_int32_int32,
("int32", "int64"): libalgos.take_2d_multi_int32_int64,
("int32", "float64"): libalgos.take_2d_multi_int32_float64,
("int64", "int64"): libalgos.take_2d_multi_int64_int64,
("int64", "float64"): libalgos.take_2d_multi_int64_float64,
("float32", "float32"): libalgos.take_2d_multi_float32_float32,
("float32", "float64"): libalgos.take_2d_multi_float32_float64,
("float64", "float64"): libalgos.take_2d_multi_float64_float64,
("object", "object"): libalgos.take_2d_multi_object_object,
("bool", "bool"): _view_wrapper(
libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8
),
("bool", "object"): _view_wrapper(
libalgos.take_2d_multi_bool_object, np.uint8, None
),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
def _take_2d_multi_object(
arr: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value,
mask_info,
) -> None:
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer # both np.intp
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i, u_ in enumerate(row_idx):
if u_ != -1:
for j, v in enumerate(col_idx):
if v != -1:
out[i, j] = arr[u_, v]
def maybe_promote(dtype: np.dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
Parameters
----------
dtype : np.dtype
fill_value : scalar, default np.nan
Returns
-------
dtype
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
Raises
------
ValueError
If fill_value is a non-scalar and dtype is not object.
"""
orig = fill_value
if checknull(fill_value):
# https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740
# avoid cache misses with NaN/NaT values that are not singletons
fill_value = _canonical_nans.get(type(fill_value), fill_value)
# for performance, we are using a cached version of the actual implementation
# of the function in _maybe_promote. However, this doesn't always work (in case
# of non-hashable arguments), so we fallback to the actual implementation if needed
try:
# error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type
# "Type[Any]"; expected "Hashable" [arg-type]
dtype, fill_value = _maybe_promote_cached(
dtype, fill_value, type(fill_value) # type: ignore[arg-type]
)
except TypeError:
# if fill_value is not hashable (required for caching)
dtype, fill_value = _maybe_promote(dtype, fill_value)
if dtype == _dtype_obj and orig is not None:
# GH#51592 restore our potentially non-canonical fill_value
fill_value = orig
return dtype, fill_value
ensure_platform_int = algos.ensure_platform_int
The provided code snippet includes necessary dependencies for implementing the `take_2d_multi` function. Write a Python function `def take_2d_multi( arr: np.ndarray, indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], fill_value=np.nan, ) -> np.ndarray` to solve the following problem:
Specialized Cython take which sets NaN values in one pass.
Here is the function:
def take_2d_multi(
arr: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
fill_value=np.nan,
) -> np.ndarray:
"""
Specialized Cython take which sets NaN values in one pass.
"""
# This is only called from one place in DataFrame._reindex_multi,
# so we know indexer is well-behaved.
assert indexer is not None
assert indexer[0] is not None
assert indexer[1] is not None
row_idx, col_idx = indexer
row_idx = ensure_platform_int(row_idx)
col_idx = ensure_platform_int(col_idx)
indexer = row_idx, col_idx
mask_info = None
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype:
# check if promotion is actually required based on indexer
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if not (row_needs or col_needs):
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is not None:
func(arr, indexer, out=out, fill_value=fill_value)
else:
# test_reindex_multi
_take_2d_multi_object(
arr, indexer, out, fill_value=fill_value, mask_info=mask_info
)
return out | Specialized Cython take which sets NaN values in one pass. |
173,188 | from __future__ import annotations
import functools
from typing import (
TYPE_CHECKING,
cast,
overload,
)
import numpy as np
from pandas._libs import (
algos as libalgos,
lib,
)
from pandas._typing import (
ArrayLike,
AxisInt,
npt,
)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int,
is_1d_only_ea_obj,
)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
) -> None:
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
# FIXME: if we get here with dt64/td64 we need to be sure we have
# matching resos
if fill_value.dtype.kind == "m":
fill_value = fill_value.astype("m8[ns]")
else:
fill_value = fill_value.astype("M8[ns]")
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper | null |
173,189 | from __future__ import annotations
import operator
import re
from typing import (
Any,
Pattern,
)
import numpy as np
from pandas._typing import (
ArrayLike,
Scalar,
npt,
)
from pandas.core.dtypes.common import (
is_re,
is_re_compilable,
is_scalar,
)
from pandas.core.dtypes.missing import isna
def should_use_regex(regex: bool, to_replace: Any) -> bool:
"""
Decide whether to treat `to_replace` as a regular expression.
"""
if is_re(to_replace):
regex = True
regex = regex and is_re_compilable(to_replace)
# Don't use regex if the pattern is empty.
regex = regex and re.compile(to_replace).pattern != ""
return regex
class Pattern(Generic[AnyStr]):
flags: int
groupindex: Mapping[str, int]
groups: int
pattern: AnyStr
def search(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Optional[Match[AnyStr]]: ...
def match(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Optional[Match[AnyStr]]: ...
# New in Python 3.4
def fullmatch(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Optional[Match[AnyStr]]: ...
def split(self, string: AnyStr, maxsplit: int = ...) -> list[AnyStr]: ...
def findall(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> list[Any]: ...
def finditer(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Iterator[Match[AnyStr]]: ...
def sub(self, repl: AnyStr, string: AnyStr, count: int = ...) -> AnyStr: ...
def sub(self, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int = ...) -> AnyStr: ...
def subn(self, repl: AnyStr, string: AnyStr, count: int = ...) -> Tuple[AnyStr, int]: ...
def subn(self, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int = ...) -> Tuple[AnyStr, int]: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
ArrayLike = Union["ExtensionArray", np.ndarray]
Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime]
def isna(obj: Scalar) -> bool:
...
def isna(
obj: ArrayLike | Index | list,
) -> npt.NDArray[np.bool_]:
...
def isna(obj: NDFrameT) -> NDFrameT:
...
def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
The provided code snippet includes necessary dependencies for implementing the `compare_or_regex_search` function. Write a Python function `def compare_or_regex_search( a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_] ) -> ArrayLike` to solve the following problem:
Compare two array-like inputs of the same shape or two scalar values Calls operator.eq or re.search, depending on regex argument. If regex is True, perform an element-wise regex matching. Parameters ---------- a : array-like b : scalar or regex pattern regex : bool mask : np.ndarray[bool] Returns ------- mask : array-like of bool
Here is the function:
def compare_or_regex_search(
a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_]
) -> ArrayLike:
"""
Compare two array-like inputs of the same shape or two scalar values
Calls operator.eq or re.search, depending on regex argument. If regex is
True, perform an element-wise regex matching.
Parameters
----------
a : array-like
b : scalar or regex pattern
regex : bool
mask : np.ndarray[bool]
Returns
-------
mask : array-like of bool
"""
if isna(b):
return ~mask
def _check_comparison_types(
result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern
):
"""
Raises an error if the two arrays (a,b) cannot be compared.
Otherwise, returns the comparison result as expected.
"""
if is_scalar(result) and isinstance(a, np.ndarray):
type_names = [type(a).__name__, type(b).__name__]
type_names[0] = f"ndarray(dtype={a.dtype})"
raise TypeError(
f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}"
)
if not regex or not should_use_regex(regex, b):
# TODO: should use missing.mask_missing?
op = lambda x: operator.eq(x, b)
else:
op = np.vectorize(
lambda x: bool(re.search(b, x))
if isinstance(x, str) and isinstance(b, (str, Pattern))
else False
)
# GH#32621 use mask to avoid comparing to NAs
if isinstance(a, np.ndarray):
a = a[mask]
result = op(a)
if isinstance(result, np.ndarray) and mask is not None:
# The shape of the mask can differ to that of the result
# since we may compare only a subset of a's or b's elements
tmp = np.zeros(mask.shape, dtype=np.bool_)
np.place(tmp, mask, result)
result = tmp
_check_comparison_types(result, a, b)
return result | Compare two array-like inputs of the same shape or two scalar values Calls operator.eq or re.search, depending on regex argument. If regex is True, perform an element-wise regex matching. Parameters ---------- a : array-like b : scalar or regex pattern regex : bool mask : np.ndarray[bool] Returns ------- mask : array-like of bool |
173,190 | from __future__ import annotations
import operator
import re
from typing import (
Any,
Pattern,
)
import numpy as np
from pandas._typing import (
ArrayLike,
Scalar,
npt,
)
from pandas.core.dtypes.common import (
is_re,
is_re_compilable,
is_scalar,
)
from pandas.core.dtypes.missing import isna
class Pattern(Generic[AnyStr]):
flags: int
groupindex: Mapping[str, int]
groups: int
pattern: AnyStr
def search(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Optional[Match[AnyStr]]: ...
def match(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Optional[Match[AnyStr]]: ...
# New in Python 3.4
def fullmatch(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Optional[Match[AnyStr]]: ...
def split(self, string: AnyStr, maxsplit: int = ...) -> list[AnyStr]: ...
def findall(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> list[Any]: ...
def finditer(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Iterator[Match[AnyStr]]: ...
def sub(self, repl: AnyStr, string: AnyStr, count: int = ...) -> AnyStr: ...
def sub(self, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int = ...) -> AnyStr: ...
def subn(self, repl: AnyStr, string: AnyStr, count: int = ...) -> Tuple[AnyStr, int]: ...
def subn(self, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int = ...) -> Tuple[AnyStr, int]: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
ArrayLike = Union["ExtensionArray", np.ndarray]
def isna(obj: Scalar) -> bool:
...
def isna(
obj: ArrayLike | Index | list,
) -> npt.NDArray[np.bool_]:
...
def isna(obj: NDFrameT) -> NDFrameT:
...
def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
The provided code snippet includes necessary dependencies for implementing the `replace_regex` function. Write a Python function `def replace_regex( values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None ) -> None` to solve the following problem:
Parameters ---------- values : ArrayLike Object dtype. rx : re.Pattern value : Any mask : np.ndarray[bool], optional Notes ----- Alters values in-place.
Here is the function:
def replace_regex(
values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None
) -> None:
"""
Parameters
----------
values : ArrayLike
Object dtype.
rx : re.Pattern
value : Any
mask : np.ndarray[bool], optional
Notes
-----
Alters values in-place.
"""
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isna(value) or not isinstance(value, str):
def re_replacer(s):
if is_re(rx) and isinstance(s, str):
return value if rx.search(s) is not None else s
else:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
if is_re(rx) and isinstance(s, str):
return rx.sub(value, s)
else:
return s
f = np.vectorize(re_replacer, otypes=[np.object_])
if mask is None:
values[:] = f(values)
else:
values[mask] = f(values[mask]) | Parameters ---------- values : ArrayLike Object dtype. rx : re.Pattern value : Any mask : np.ndarray[bool], optional Notes ----- Alters values in-place. |
173,191 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._libs import iNaT
from pandas.core.dtypes.missing import isna
def _cum_func(
func: Callable,
values: np.ndarray,
*,
skipna: bool = True,
):
def cummin(values: np.ndarray, *, skipna: bool = True):
return _cum_func(np.minimum.accumulate, values, skipna=skipna) | null |
173,192 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._libs import iNaT
from pandas.core.dtypes.missing import isna
def _cum_func(
func: Callable,
values: np.ndarray,
*,
skipna: bool = True,
):
"""
Accumulations for 1D datetimelike arrays.
Parameters
----------
func : np.cumsum, np.maximum.accumulate, np.minimum.accumulate
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation). Values is changed is modified inplace.
skipna : bool, default True
Whether to skip NA.
"""
try:
fill_value = {
np.maximum.accumulate: np.iinfo(np.int64).min,
np.cumsum: 0,
np.minimum.accumulate: np.iinfo(np.int64).max,
}[func]
except KeyError:
raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray")
mask = isna(values)
y = values.view("i8")
y[mask] = fill_value
if not skipna:
mask = np.maximum.accumulate(mask)
result = func(y)
result[mask] = iNaT
if values.dtype.kind in ["m", "M"]:
return result.view(values.dtype.base)
return result
def cummax(values: np.ndarray, *, skipna: bool = True):
return _cum_func(np.maximum.accumulate, values, skipna=skipna) | null |
173,193 | from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
)
import numpy as np
from pandas._libs import lib
from pandas._typing import (
ArrayLike,
npt,
)
from pandas.compat import np_version_under1p21
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import is_list_like
from pandas.core.arrays import ExtensionArray
Any = object()
ArrayLike = Union["ExtensionArray", np.ndarray]
The provided code snippet includes necessary dependencies for implementing the `putmask_inplace` function. Write a Python function `def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None` to solve the following problem:
ExtensionArray-compatible implementation of np.putmask. The main difference is we do not handle repeating or truncating like numpy. Parameters ---------- values: np.ndarray or ExtensionArray mask : np.ndarray[bool] We assume extract_bool_array has already been called. value : Any
Here is the function:
def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None:
"""
ExtensionArray-compatible implementation of np.putmask. The main
difference is we do not handle repeating or truncating like numpy.
Parameters
----------
values: np.ndarray or ExtensionArray
mask : np.ndarray[bool]
We assume extract_bool_array has already been called.
value : Any
"""
if (
not isinstance(values, np.ndarray)
or (values.dtype == object and not lib.is_scalar(value))
# GH#43424: np.putmask raises TypeError if we cannot cast between types with
# rule = "safe", a stricter guarantee we may not have here
or (
isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype)
)
):
# GH#19266 using np.putmask gives unexpected results with listlike value
# along with object dtype
if is_list_like(value) and len(value) == len(values):
values[mask] = value[mask]
else:
values[mask] = value
else:
# GH#37833 np.putmask is more performant than __setitem__
np.putmask(values, mask, value) | ExtensionArray-compatible implementation of np.putmask. The main difference is we do not handle repeating or truncating like numpy. Parameters ---------- values: np.ndarray or ExtensionArray mask : np.ndarray[bool] We assume extract_bool_array has already been called. value : Any |
173,194 | from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
)
import numpy as np
from pandas._libs import lib
from pandas._typing import (
ArrayLike,
npt,
)
from pandas.compat import np_version_under1p21
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import is_list_like
from pandas.core.arrays import ExtensionArray
def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other):
"""
Parameters
----------
values : np.ndarray
num_set : int
For putmask, this is mask.sum()
other : Any
"""
if values.dtype == object:
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if isinstance(dtype, np.dtype) and dtype.kind in ["m", "M"]:
# https://github.com/numpy/numpy/issues/12550
# timedelta64 will incorrectly cast to int
if not is_list_like(other):
other = [other] * num_set
else:
other = list(other)
return other
Any = object()
The provided code snippet includes necessary dependencies for implementing the `putmask_without_repeat` function. Write a Python function `def putmask_without_repeat( values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any ) -> None` to solve the following problem:
np.putmask will truncate or repeat if `new` is a listlike with len(new) != len(values). We require an exact match. Parameters ---------- values : np.ndarray mask : np.ndarray[bool] new : Any
Here is the function:
def putmask_without_repeat(
values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any
) -> None:
"""
np.putmask will truncate or repeat if `new` is a listlike with
len(new) != len(values). We require an exact match.
Parameters
----------
values : np.ndarray
mask : np.ndarray[bool]
new : Any
"""
if np_version_under1p21:
new = setitem_datetimelike_compat(values, mask.sum(), new)
if getattr(new, "ndim", 0) >= 1:
new = new.astype(values.dtype, copy=False)
# TODO: this prob needs some better checking for 2D cases
nlocs = mask.sum()
if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1:
shape = np.shape(new)
# np.shape compat for if setitem_datetimelike_compat
# changed arraylike to list e.g. test_where_dt64_2d
if nlocs == shape[-1]:
# GH#30567
# If length of ``new`` is less than the length of ``values``,
# `np.putmask` would first repeat the ``new`` array and then
# assign the masked values hence produces incorrect result.
# `np.place` on the other hand uses the ``new`` values at it is
# to place in the masked locations of ``values``
np.place(values, mask, new)
# i.e. values[mask] = new
elif mask.shape[-1] == shape[-1] or shape[-1] == 1:
np.putmask(values, mask, new)
else:
raise ValueError("cannot assign mismatch length to masked array")
else:
np.putmask(values, mask, new) | np.putmask will truncate or repeat if `new` is a listlike with len(new) != len(values). We require an exact match. Parameters ---------- values : np.ndarray mask : np.ndarray[bool] new : Any |
173,195 | from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
)
import numpy as np
from pandas._libs import lib
from pandas._typing import (
ArrayLike,
npt,
)
from pandas.compat import np_version_under1p21
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import is_list_like
from pandas.core.arrays import ExtensionArray
def extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]:
"""
If we have a SparseArray or BooleanArray, convert it to ndarray[bool].
"""
if isinstance(mask, ExtensionArray):
# We could have BooleanArray, Sparse[bool], ...
# Except for BooleanArray, this is equivalent to just
# np.asarray(mask, dtype=bool)
mask = mask.to_numpy(dtype=bool, na_value=False)
mask = np.asarray(mask, dtype=bool)
return mask
ArrayLike = Union["ExtensionArray", np.ndarray]
The provided code snippet includes necessary dependencies for implementing the `validate_putmask` function. Write a Python function `def validate_putmask( values: ArrayLike | MultiIndex, mask: np.ndarray ) -> tuple[npt.NDArray[np.bool_], bool]` to solve the following problem:
Validate mask and check if this putmask operation is a no-op.
Here is the function:
def validate_putmask(
values: ArrayLike | MultiIndex, mask: np.ndarray
) -> tuple[npt.NDArray[np.bool_], bool]:
"""
Validate mask and check if this putmask operation is a no-op.
"""
mask = extract_bool_array(mask)
if mask.shape != values.shape:
raise ValueError("putmask: mask and data must be the same size")
noop = not mask.any()
return mask, noop | Validate mask and check if this putmask operation is a no-op. |
173,196 | from __future__ import annotations
import numpy as np
from pandas._typing import (
ArrayLike,
Scalar,
npt,
)
from pandas.compat.numpy import np_percentile_argname
from pandas.core.dtypes.missing import (
isna,
na_value_for_dtype,
)
def quantile_with_mask(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
fill_value,
qs: npt.NDArray[np.float64],
interpolation: str,
) -> np.ndarray:
"""
Compute the quantiles of the given values for each quantile in `qs`.
Parameters
----------
values : np.ndarray
For ExtensionArray, this is _values_for_factorize()[0]
mask : np.ndarray[bool]
mask = isna(values)
For ExtensionArray, this is computed before calling _value_for_factorize
fill_value : Scalar
The value to interpret fill NA entries with
For ExtensionArray, this is _values_for_factorize()[1]
qs : np.ndarray[float64]
interpolation : str
Type of interpolation
Returns
-------
np.ndarray
Notes
-----
Assumes values is already 2D. For ExtensionArray this means np.atleast_2d
has been called on _values_for_factorize()[0]
Quantile is computed along axis=1.
"""
assert values.shape == mask.shape
if values.ndim == 1:
# unsqueeze, operate, re-squeeze
values = np.atleast_2d(values)
mask = np.atleast_2d(mask)
res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation)
return res_values[0]
assert values.ndim == 2
is_empty = values.shape[1] == 0
if is_empty:
# create the array of na_values
# 2d len(values) * len(qs)
flat = np.array([fill_value] * len(qs))
result = np.repeat(flat, len(values)).reshape(len(values), len(qs))
else:
result = _nanpercentile(
values,
qs * 100.0,
na_value=fill_value,
mask=mask,
interpolation=interpolation,
)
result = np.array(result, copy=False)
result = result.T
return result
ArrayLike = Union["ExtensionArray", np.ndarray]
def isna(obj: Scalar) -> bool:
...
def isna(
obj: ArrayLike | Index | list,
) -> npt.NDArray[np.bool_]:
...
def isna(obj: NDFrameT) -> NDFrameT:
...
def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
def na_value_for_dtype(dtype: DtypeObj, compat: bool = True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : bool, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
numpy.datetime64('NaT')
"""
if isinstance(dtype, ExtensionDtype):
return dtype.na_value
elif needs_i8_conversion(dtype):
return dtype.type("NaT", "ns")
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
if compat:
return False
return np.nan
return np.nan
The provided code snippet includes necessary dependencies for implementing the `quantile_compat` function. Write a Python function `def quantile_compat( values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str ) -> ArrayLike` to solve the following problem:
Compute the quantiles of the given values for each quantile in `qs`. Parameters ---------- values : np.ndarray or ExtensionArray qs : np.ndarray[float64] interpolation : str Returns ------- np.ndarray or ExtensionArray
Here is the function:
def quantile_compat(
values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str
) -> ArrayLike:
"""
Compute the quantiles of the given values for each quantile in `qs`.
Parameters
----------
values : np.ndarray or ExtensionArray
qs : np.ndarray[float64]
interpolation : str
Returns
-------
np.ndarray or ExtensionArray
"""
if isinstance(values, np.ndarray):
fill_value = na_value_for_dtype(values.dtype, compat=False)
mask = isna(values)
return quantile_with_mask(values, mask, fill_value, qs, interpolation)
else:
return values._quantile(qs, interpolation) | Compute the quantiles of the given values for each quantile in `qs`. Parameters ---------- values : np.ndarray or ExtensionArray qs : np.ndarray[float64] interpolation : str Returns ------- np.ndarray or ExtensionArray |
173,197 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._typing import npt
from pandas.core.dtypes.common import (
is_bool_dtype,
is_float_dtype,
is_integer_dtype,
)
def _cum_func(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
):
"""
Accumulations for 1D masked array.
We will modify values in place to replace NAs with the appropriate fill value.
Parameters
----------
func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
"""
dtype_info: np.iinfo | np.finfo
if is_float_dtype(values):
dtype_info = np.finfo(values.dtype.type)
elif is_integer_dtype(values):
dtype_info = np.iinfo(values.dtype.type)
elif is_bool_dtype(values):
# Max value of bool is 1, but since we are setting into a boolean
# array, 255 is fine as well. Min value has to be 0 when setting
# into the boolean array.
dtype_info = np.iinfo(np.uint8)
else:
raise NotImplementedError(
f"No masked accumulation defined for dtype {values.dtype.type}"
)
try:
fill_value = {
np.cumprod: 1,
np.maximum.accumulate: dtype_info.min,
np.cumsum: 0,
np.minimum.accumulate: dtype_info.max,
}[func]
except KeyError:
raise NotImplementedError(
f"No accumulation for {func} implemented on BaseMaskedArray"
)
values[mask] = fill_value
if not skipna:
mask = np.maximum.accumulate(mask)
values = func(values)
return values, mask
def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna) | null |
173,198 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._typing import npt
from pandas.core.dtypes.common import (
is_bool_dtype,
is_float_dtype,
is_integer_dtype,
)
def _cum_func(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
):
"""
Accumulations for 1D masked array.
We will modify values in place to replace NAs with the appropriate fill value.
Parameters
----------
func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
"""
dtype_info: np.iinfo | np.finfo
if is_float_dtype(values):
dtype_info = np.finfo(values.dtype.type)
elif is_integer_dtype(values):
dtype_info = np.iinfo(values.dtype.type)
elif is_bool_dtype(values):
# Max value of bool is 1, but since we are setting into a boolean
# array, 255 is fine as well. Min value has to be 0 when setting
# into the boolean array.
dtype_info = np.iinfo(np.uint8)
else:
raise NotImplementedError(
f"No masked accumulation defined for dtype {values.dtype.type}"
)
try:
fill_value = {
np.cumprod: 1,
np.maximum.accumulate: dtype_info.min,
np.cumsum: 0,
np.minimum.accumulate: dtype_info.max,
}[func]
except KeyError:
raise NotImplementedError(
f"No accumulation for {func} implemented on BaseMaskedArray"
)
values[mask] = fill_value
if not skipna:
mask = np.maximum.accumulate(mask)
values = func(values)
return values, mask
def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna) | null |
173,199 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._libs import missing as libmissing
from pandas._typing import (
AxisInt,
npt,
)
from pandas.core.nanops import check_below_min_count
def _reductions(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
**kwargs,
):
"""
Sum, mean or product for 1D masked array.
Parameters
----------
func : np.sum or np.prod
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray[bool]
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
axis : int, optional, default None
"""
if not skipna:
if mask.any(axis=axis) or check_below_min_count(values.shape, None, min_count):
return libmissing.NA
else:
return func(values, axis=axis, **kwargs)
else:
if check_below_min_count(values.shape, mask, min_count) and (
axis is None or values.ndim == 1
):
return libmissing.NA
return func(values, where=~mask, axis=axis, **kwargs)
AxisInt = int
def sum(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
):
return _reductions(
np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
) | null |
173,200 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._libs import missing as libmissing
from pandas._typing import (
AxisInt,
npt,
)
from pandas.core.nanops import check_below_min_count
def _reductions(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
**kwargs,
):
"""
Sum, mean or product for 1D masked array.
Parameters
----------
func : np.sum or np.prod
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray[bool]
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
axis : int, optional, default None
"""
if not skipna:
if mask.any(axis=axis) or check_below_min_count(values.shape, None, min_count):
return libmissing.NA
else:
return func(values, axis=axis, **kwargs)
else:
if check_below_min_count(values.shape, mask, min_count) and (
axis is None or values.ndim == 1
):
return libmissing.NA
return func(values, where=~mask, axis=axis, **kwargs)
AxisInt = int
def prod(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
):
return _reductions(
np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
) | null |
173,201 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._libs import missing as libmissing
from pandas._typing import (
AxisInt,
npt,
)
from pandas.core.nanops import check_below_min_count
def _minmax(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
):
AxisInt = int
def min(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
):
return _minmax(np.min, values=values, mask=mask, skipna=skipna, axis=axis) | null |
173,202 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._libs import missing as libmissing
from pandas._typing import (
AxisInt,
npt,
)
from pandas.core.nanops import check_below_min_count
def _minmax(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
):
"""
Reduction for 1D masked array.
Parameters
----------
func : np.min or np.max
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray[bool]
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
axis : int, optional, default None
"""
if not skipna:
if mask.any() or not values.size:
# min/max with empty array raise in numpy, pandas returns NA
return libmissing.NA
else:
return func(values)
else:
subset = values[~mask]
if subset.size:
return func(subset)
else:
# min/max with empty array raise in numpy, pandas returns NA
return libmissing.NA
AxisInt = int
def max(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
):
return _minmax(np.max, values=values, mask=mask, skipna=skipna, axis=axis) | null |
173,203 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._libs import missing as libmissing
from pandas._typing import (
AxisInt,
npt,
)
from pandas.core.nanops import check_below_min_count
def _reductions(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
**kwargs,
):
"""
Sum, mean or product for 1D masked array.
Parameters
----------
func : np.sum or np.prod
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray[bool]
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
axis : int, optional, default None
"""
if not skipna:
if mask.any(axis=axis) or check_below_min_count(values.shape, None, min_count):
return libmissing.NA
else:
return func(values, axis=axis, **kwargs)
else:
if check_below_min_count(values.shape, mask, min_count) and (
axis is None or values.ndim == 1
):
return libmissing.NA
return func(values, where=~mask, axis=axis, **kwargs)
AxisInt = int
def mean(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
):
if not values.size or mask.all():
return libmissing.NA
return _reductions(np.mean, values=values, mask=mask, skipna=skipna, axis=axis) | null |
173,204 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._libs import missing as libmissing
from pandas._typing import (
AxisInt,
npt,
)
from pandas.core.nanops import check_below_min_count
def _reductions(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
**kwargs,
):
"""
Sum, mean or product for 1D masked array.
Parameters
----------
func : np.sum or np.prod
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray[bool]
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
axis : int, optional, default None
"""
if not skipna:
if mask.any(axis=axis) or check_below_min_count(values.shape, None, min_count):
return libmissing.NA
else:
return func(values, axis=axis, **kwargs)
else:
if check_below_min_count(values.shape, mask, min_count) and (
axis is None or values.ndim == 1
):
return libmissing.NA
return func(values, where=~mask, axis=axis, **kwargs)
AxisInt = int
def var(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
ddof: int = 1,
):
if not values.size or mask.all():
return libmissing.NA
return _reductions(
np.var, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof
) | null |
173,205 | from __future__ import annotations
from typing import Callable
import numpy as np
from pandas._libs import missing as libmissing
from pandas._typing import (
AxisInt,
npt,
)
from pandas.core.nanops import check_below_min_count
def _reductions(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
**kwargs,
):
AxisInt = int
def std(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
ddof: int = 1,
):
if not values.size or mask.all():
return libmissing.NA
return _reductions(
np.std, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof
) | null |
173,206 | from __future__ import annotations
import operator
def radd(left, right):
return right + left | null |
173,207 | from __future__ import annotations
import operator
def rsub(left, right):
return right - left | null |
173,208 | from __future__ import annotations
import operator
def rmul(left, right):
return right * left | null |
173,209 | from __future__ import annotations
import operator
def rdiv(left, right):
return right / left | null |
173,210 | from __future__ import annotations
import operator
def rtruediv(left, right):
return right / left | null |
173,211 | from __future__ import annotations
import operator
def rfloordiv(left, right):
return right // left | null |
173,212 | from __future__ import annotations
import operator
def rmod(left, right):
# check if right is a string as % is the string
# formatting operation; this is a TypeError
# otherwise perform the op
if isinstance(right, str):
typ = type(left).__name__
raise TypeError(f"{typ} cannot perform the operation mod")
return right % left | null |
173,213 | from __future__ import annotations
import operator
def rdivmod(left, right):
return divmod(right, left) | null |
173,214 | from __future__ import annotations
import operator
def rpow(left, right):
return right**left | null |
173,215 | from __future__ import annotations
import operator
def rand_(left, right):
return operator.and_(right, left) | null |
173,216 | from __future__ import annotations
import operator
def ror_(left, right):
return operator.or_(right, left) | null |
173,217 | from __future__ import annotations
import operator
def rxor(left, right):
return operator.xor(right, left) | null |
173,218 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
def consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
try:
if obj.name != name:
name = None
except ValueError:
name = None
return name | null |
173,219 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
Any = object()
def is_bool_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a boolean dtype.
Notes
-----
An ExtensionArray is considered boolean when the ``_is_boolean``
attribute is set to True.
Examples
--------
>>> from pandas.api.types import is_bool_dtype
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool_)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
>>> is_bool_dtype(pd.Categorical([True, False]))
True
>>> is_bool_dtype(pd.arrays.SparseArray([True, False]))
True
"""
if arr_or_dtype is None:
return False
try:
dtype = get_dtype(arr_or_dtype)
except (TypeError, ValueError):
return False
if isinstance(dtype, CategoricalDtype):
arr_or_dtype = dtype.categories
# now we use the special definition for Index
if isinstance(arr_or_dtype, ABCIndex):
# Allow Index[object] that is all-bools or Index["boolean"]
return arr_or_dtype.inferred_type == "boolean"
elif isinstance(dtype, ExtensionDtype):
return getattr(dtype, "_is_boolean", False)
return issubclass(dtype.type, np.bool_)
def is_extension_array_dtype(arr_or_dtype) -> bool:
"""
Check if an object is a pandas extension array type.
See the :ref:`Use Guide <extending.extension-types>` for more.
Parameters
----------
arr_or_dtype : object
For array-like input, the ``.dtype`` attribute will
be extracted.
Returns
-------
bool
Whether the `arr_or_dtype` is an extension array type.
Notes
-----
This checks whether an object implements the pandas extension
array interface. In pandas, this includes:
* Categorical
* Sparse
* Interval
* Period
* DatetimeArray
* TimedeltaArray
Third-party libraries may implement arrays or types satisfying
this interface as well.
Examples
--------
>>> from pandas.api.types import is_extension_array_dtype
>>> arr = pd.Categorical(['a', 'b'])
>>> is_extension_array_dtype(arr)
True
>>> is_extension_array_dtype(arr.dtype)
True
>>> arr = np.array(['a', 'b'])
>>> is_extension_array_dtype(arr.dtype)
False
"""
dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype)
if isinstance(dtype, ExtensionDtype):
return True
elif isinstance(dtype, np.dtype):
return False
else:
return registry.find(dtype) is not None
ABCIndex = cast(
"Type[Index]",
create_pandas_abc_type(
"ABCIndex",
"_typ",
{
"index",
"rangeindex",
"multiindex",
"datetimeindex",
"timedeltaindex",
"periodindex",
"categoricalindex",
"intervalindex",
},
),
)
ABCSeries = cast(
"Type[Series]",
create_pandas_abc_type("ABCSeries", "_typ", ("series",)),
)
def isna(obj: Scalar) -> bool:
...
def isna(
obj: ArrayLike | Index | list,
) -> npt.NDArray[np.bool_]:
...
def isna(obj: NDFrameT) -> NDFrameT:
...
def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
The provided code snippet includes necessary dependencies for implementing the `is_bool_indexer` function. Write a Python function `def is_bool_indexer(key: Any) -> bool` to solve the following problem:
Check whether `key` is a valid boolean indexer. Parameters ---------- key : Any Only list-likes may be considered boolean indexers. All other types are not considered a boolean indexer. For array-like input, boolean ndarrays or ExtensionArrays with ``_is_boolean`` set are considered boolean indexers. Returns ------- bool Whether `key` is a valid boolean indexer. Raises ------ ValueError When the array is an object-dtype ndarray or ExtensionArray and contains missing values. See Also -------- check_array_indexer : Check that `key` is a valid array to index, and convert to an ndarray.
Here is the function:
def is_bool_indexer(key: Any) -> bool:
"""
Check whether `key` is a valid boolean indexer.
Parameters
----------
key : Any
Only list-likes may be considered boolean indexers.
All other types are not considered a boolean indexer.
For array-like input, boolean ndarrays or ExtensionArrays
with ``_is_boolean`` set are considered boolean indexers.
Returns
-------
bool
Whether `key` is a valid boolean indexer.
Raises
------
ValueError
When the array is an object-dtype ndarray or ExtensionArray
and contains missing values.
See Also
--------
check_array_indexer : Check that `key` is a valid array to index,
and convert to an ndarray.
"""
if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or (
is_array_like(key) and is_extension_array_dtype(key.dtype)
):
if key.dtype == np.object_:
key_array = np.asarray(key)
if not lib.is_bool_array(key_array):
na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
if lib.infer_dtype(key_array) == "boolean" and isna(key_array).any():
# Don't raise on e.g. ["A", "B", np.nan], see
# test_loc_getitem_list_of_labels_categoricalindex_with_na
raise ValueError(na_msg)
return False
return True
elif is_bool_dtype(key.dtype):
return True
elif isinstance(key, list):
# check if np.array(key).dtype would be bool
if len(key) > 0:
if type(key) is not list:
# GH#42461 cython will raise TypeError if we pass a subclass
key = list(key)
return lib.is_bool_list(key)
return False | Check whether `key` is a valid boolean indexer. Parameters ---------- key : Any Only list-likes may be considered boolean indexers. All other types are not considered a boolean indexer. For array-like input, boolean ndarrays or ExtensionArrays with ``_is_boolean`` set are considered boolean indexers. Returns ------- bool Whether `key` is a valid boolean indexer. Raises ------ ValueError When the array is an object-dtype ndarray or ExtensionArray and contains missing values. See Also -------- check_array_indexer : Check that `key` is a valid array to index, and convert to an ndarray. |
173,220 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
The provided code snippet includes necessary dependencies for implementing the `cast_scalar_indexer` function. Write a Python function `def cast_scalar_indexer(val)` to solve the following problem:
Disallow indexing with a float key, even if that key is a round number. Parameters ---------- val : scalar Returns ------- outval : scalar
Here is the function:
def cast_scalar_indexer(val):
"""
Disallow indexing with a float key, even if that key is a round number.
Parameters
----------
val : scalar
Returns
-------
outval : scalar
"""
# assumes lib.is_scalar(val)
if lib.is_float(val) and val.is_integer():
raise IndexError(
# GH#34193
"Indexing with a float is no longer supported. Manually convert "
"to an integer key instead."
)
return val | Disallow indexing with a float key, even if that key is a round number. Parameters ---------- val : scalar Returns ------- outval : scalar |
173,221 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
The provided code snippet includes necessary dependencies for implementing the `all_none` function. Write a Python function `def all_none(*args) -> bool` to solve the following problem:
Returns a boolean indicating if all arguments are None.
Here is the function:
def all_none(*args) -> bool:
"""
Returns a boolean indicating if all arguments are None.
"""
return all(arg is None for arg in args) | Returns a boolean indicating if all arguments are None. |
173,222 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
def asarray_tuplesafe(
values: ArrayLike | list | tuple | zip, dtype: NpDtype | None = ...
) -> np.ndarray:
# ExtensionArray can only be returned when values is an Index, all other iterables
# will return np.ndarray. Unfortunately "all other" cannot be encoded in a type
# signature, so instead we special-case some common types.
...
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike:
...
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike:
if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
values = list(values)
elif isinstance(values, ABCIndex):
return values._values
if isinstance(values, list) and dtype in [np.object_, object]:
return construct_1d_object_array_from_listlike(values)
try:
with warnings.catch_warnings():
# Can remove warning filter once NumPy 1.24 is min version
warnings.simplefilter("ignore", np.VisibleDeprecationWarning)
result = np.asarray(values, dtype=dtype)
except ValueError:
# Using try/except since it's more performant than checking is_list_like
# over each element
# error: Argument 1 to "construct_1d_object_array_from_listlike"
# has incompatible type "Iterable[Any]"; expected "Sized"
return construct_1d_object_array_from_listlike(values) # type: ignore[arg-type]
if issubclass(result.dtype.type, str):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
# Avoid building an array of arrays:
values = [tuple(x) for x in values]
result = construct_1d_object_array_from_listlike(values)
return result
class Iterable(Protocol[_T_co]):
def __iter__(self) -> Iterator[_T_co]: ...
NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]]
The provided code snippet includes necessary dependencies for implementing the `index_labels_to_array` function. Write a Python function `def index_labels_to_array( labels: np.ndarray | Iterable, dtype: NpDtype | None = None ) -> np.ndarray` to solve the following problem:
Transform label or iterable of labels to array, for use in Index. Parameters ---------- dtype : dtype If specified, use as dtype of the resulting array, otherwise infer. Returns ------- array
Here is the function:
def index_labels_to_array(
labels: np.ndarray | Iterable, dtype: NpDtype | None = None
) -> np.ndarray:
"""
Transform label or iterable of labels to array, for use in Index.
Parameters
----------
dtype : dtype
If specified, use as dtype of the resulting array, otherwise infer.
Returns
-------
array
"""
if isinstance(labels, (str, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = asarray_tuplesafe(labels, dtype=dtype)
return labels | Transform label or iterable of labels to array, for use in Index. Parameters ---------- dtype : dtype If specified, use as dtype of the resulting array, otherwise infer. Returns ------- array |
173,223 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
def maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj | null |
173,224 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
The provided code snippet includes necessary dependencies for implementing the `is_empty_slice` function. Write a Python function `def is_empty_slice(obj) -> bool` to solve the following problem:
We have an empty slice, e.g. no values are selected.
Here is the function:
def is_empty_slice(obj) -> bool:
"""
We have an empty slice, e.g. no values are selected.
"""
return (
isinstance(obj, slice)
and obj.start is not None
and obj.stop is not None
and obj.start == obj.stop
) | We have an empty slice, e.g. no values are selected. |
173,225 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
def is_null_slice(obj) -> bool:
"""
We have a null slice.
"""
return (
isinstance(obj, slice)
and obj.start is None
and obj.stop is None
and obj.step is None
)
The provided code snippet includes necessary dependencies for implementing the `is_true_slices` function. Write a Python function `def is_true_slices(line) -> list[bool]` to solve the following problem:
Find non-trivial slices in "line": return a list of booleans with same length.
Here is the function:
def is_true_slices(line) -> list[bool]:
"""
Find non-trivial slices in "line": return a list of booleans with same length.
"""
return [isinstance(k, slice) and not is_null_slice(k) for k in line] | Find non-trivial slices in "line": return a list of booleans with same length. |
173,226 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
The provided code snippet includes necessary dependencies for implementing the `is_full_slice` function. Write a Python function `def is_full_slice(obj, line: int) -> bool` to solve the following problem:
We have a full length slice.
Here is the function:
def is_full_slice(obj, line: int) -> bool:
"""
We have a full length slice.
"""
return (
isinstance(obj, slice)
and obj.start == 0
and obj.stop == line
and obj.step is None
) | We have a full length slice. |
173,227 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
The provided code snippet includes necessary dependencies for implementing the `apply_if_callable` function. Write a Python function `def apply_if_callable(maybe_callable, obj, **kwargs)` to solve the following problem:
Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs
Here is the function:
def apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is.
Parameters
----------
maybe_callable : possibly a callable
obj : NDFrame
**kwargs
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable | Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs |
173,228 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
class Callable(BaseTypingInstance):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
def py__get__(self, instance, class_value):
return ValueSet([self])
T = TypeVar("T")
The provided code snippet includes necessary dependencies for implementing the `pipe` function. Write a Python function `def pipe( obj, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs ) -> T` to solve the following problem:
Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, interpret the first element of the tuple as a function and pass the obj to that function as a keyword argument whose key is the value of the second element of the tuple. Parameters ---------- func : callable or tuple of (callable, str) Function to apply to this object or, alternatively, a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the object. *args : iterable, optional Positional arguments passed into ``func``. **kwargs : dict, optional A dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``.
Here is the function:
def pipe(
obj, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs
) -> T:
"""
Apply a function ``func`` to object ``obj`` either by passing obj as the
first argument to the function or, in the case that the func is a tuple,
interpret the first element of the tuple as a function and pass the obj to
that function as a keyword argument whose key is the value of the second
element of the tuple.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this object or, alternatively, a
``(callable, data_keyword)`` tuple where ``data_keyword`` is a
string indicating the keyword of ``callable`` that expects the
object.
*args : iterable, optional
Positional arguments passed into ``func``.
**kwargs : dict, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
msg = f"{target} is both the pipe target and a keyword argument"
raise ValueError(msg)
kwargs[target] = obj
return func(*args, **kwargs)
else:
return func(obj, *args, **kwargs) | Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, interpret the first element of the tuple as a function and pass the obj to that function as a keyword argument whose key is the value of the second element of the tuple. Parameters ---------- func : callable or tuple of (callable, str) Function to apply to this object or, alternatively, a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the object. *args : iterable, optional Positional arguments passed into ``func``. **kwargs : dict, optional A dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. |
173,229 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
ABCSeries = cast(
"Type[Series]",
create_pandas_abc_type("ABCSeries", "_typ", ("series",)),
)
The provided code snippet includes necessary dependencies for implementing the `get_rename_function` function. Write a Python function `def get_rename_function(mapper)` to solve the following problem:
Returns a function that will map names/labels, dependent if mapper is a dict, Series or just a function.
Here is the function:
def get_rename_function(mapper):
"""
Returns a function that will map names/labels, dependent if mapper
is a dict, Series or just a function.
"""
def f(x):
if x in mapper:
return mapper[x]
else:
return x
return f if isinstance(mapper, (abc.Mapping, ABCSeries)) else mapper | Returns a function that will map names/labels, dependent if mapper is a dict, Series or just a function. |
173,230 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
class Generator(Iterator[_T_co], Generic[_T_co, _T_contra, _V_co]):
def __next__(self) -> _T_co: ...
def send(self, __value: _T_contra) -> _T_co: ...
def throw(
self, __typ: Type[BaseException], __val: Union[BaseException, object] = ..., __tb: Optional[TracebackType] = ...
) -> _T_co: ...
def throw(self, __typ: BaseException, __val: None = ..., __tb: Optional[TracebackType] = ...) -> _T_co: ...
def close(self) -> None: ...
def __iter__(self) -> Generator[_T_co, _T_contra, _V_co]: ...
def gi_code(self) -> CodeType: ...
def gi_frame(self) -> FrameType: ...
def gi_running(self) -> bool: ...
def gi_yieldfrom(self) -> Optional[Generator[Any, Any, Any]]: ...
The provided code snippet includes necessary dependencies for implementing the `temp_setattr` function. Write a Python function `def temp_setattr(obj, attr: str, value) -> Generator[None, None, None]` to solve the following problem:
Temporarily set attribute on an object. Args: obj: Object whose attribute will be modified. attr: Attribute to modify. value: Value to temporarily set attribute to. Yields: obj with modified attribute.
Here is the function:
def temp_setattr(obj, attr: str, value) -> Generator[None, None, None]:
"""Temporarily set attribute on an object.
Args:
obj: Object whose attribute will be modified.
attr: Attribute to modify.
value: Value to temporarily set attribute to.
Yields:
obj with modified attribute.
"""
old_value = getattr(obj, attr)
setattr(obj, attr, value)
try:
yield obj
finally:
setattr(obj, attr, old_value) | Temporarily set attribute on an object. Args: obj: Object whose attribute will be modified. attr: Attribute to modify. value: Value to temporarily set attribute to. Yields: obj with modified attribute. |
173,231 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
_cython_table = {
builtins.sum: "sum",
builtins.max: "max",
builtins.min: "min",
np.all: "all",
np.any: "any",
np.sum: "sum",
np.nansum: "sum",
np.mean: "mean",
np.nanmean: "mean",
np.prod: "prod",
np.nanprod: "prod",
np.std: "std",
np.nanstd: "std",
np.var: "var",
np.nanvar: "var",
np.median: "median",
np.nanmedian: "median",
np.max: "max",
np.nanmax: "max",
np.min: "min",
np.nanmin: "min",
np.cumprod: "cumprod",
np.nancumprod: "cumprod",
np.cumsum: "cumsum",
np.nancumsum: "cumsum",
}
class Callable(BaseTypingInstance):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
def py__get__(self, instance, class_value):
return ValueSet([self])
The provided code snippet includes necessary dependencies for implementing the `get_cython_func` function. Write a Python function `def get_cython_func(arg: Callable) -> str | None` to solve the following problem:
if we define an internal function for this argument, return it
Here is the function:
def get_cython_func(arg: Callable) -> str | None:
"""
if we define an internal function for this argument, return it
"""
return _cython_table.get(arg) | if we define an internal function for this argument, return it |
173,232 | from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
_builtin_table = {
builtins.sum: np.sum,
builtins.max: np.maximum.reduce,
builtins.min: np.minimum.reduce,
}
The provided code snippet includes necessary dependencies for implementing the `is_builtin_func` function. Write a Python function `def is_builtin_func(arg)` to solve the following problem:
if we define a builtin function for this argument, return it, otherwise return the arg
Here is the function:
def is_builtin_func(arg):
"""
if we define a builtin function for this argument, return it,
otherwise return the arg
"""
return _builtin_table.get(arg, arg) | if we define a builtin function for this argument, return it, otherwise return the arg |
173,233 | from __future__ import annotations
import copy
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Callable,
Hashable,
Literal,
cast,
final,
no_type_check,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._libs.tslibs import (
BaseOffset,
IncompatibleFrequency,
NaT,
Period,
Timedelta,
Timestamp,
to_offset,
)
from pandas._typing import (
AnyArrayLike,
Axis,
AxisInt,
Frequency,
IndexLabel,
NDFrameT,
QuantileInterpolation,
T,
TimedeltaConvertibleTypes,
TimeGrouperOrigin,
TimestampConvertibleTypes,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
import pandas.core.algorithms as algos
from pandas.core.apply import ResamplerWindowApply
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.generic import (
NDFrame,
_shared_docs,
)
from pandas.core.groupby.generic import SeriesGroupBy
from pandas.core.groupby.groupby import (
BaseGroupBy,
GroupBy,
_pipe_template,
get_groupby,
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.ops import BinGrouper
from pandas.core.indexes.datetimes import (
DatetimeIndex,
date_range,
)
from pandas.core.indexes.period import (
PeriodIndex,
period_range,
)
from pandas.core.indexes.timedeltas import (
TimedeltaIndex,
timedelta_range,
)
from pandas.tseries.frequencies import (
is_subperiod,
is_superperiod,
)
from pandas.tseries.offsets import (
Day,
Tick,
)
class Resampler(BaseGroupBy, PandasObject):
"""
Class for resampling datetimelike data, a groupby-like operation.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.resample(...) to use Resampler.
Parameters
----------
obj : Series or DataFrame
groupby : TimeGrouper
axis : int, default 0
kind : str or None
'period', 'timestamp' to override default index treatment
Returns
-------
a Resampler of the appropriate type
Notes
-----
After resampling, see aggregate, apply, and transform functions.
"""
grouper: BinGrouper
_timegrouper: TimeGrouper
binner: DatetimeIndex | TimedeltaIndex | PeriodIndex # depends on subclass
exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat
_internal_names_set = set({"obj", "ax", "_indexer"})
# to the groupby descriptor
_attributes = [
"freq",
"axis",
"closed",
"label",
"convention",
"kind",
"origin",
"offset",
]
def __init__(
self,
obj: NDFrame,
timegrouper: TimeGrouper,
axis: Axis = 0,
kind=None,
*,
gpr_index: Index,
group_keys: bool = False,
selection=None,
) -> None:
self._timegrouper = timegrouper
self.keys = None
self.sort = True
self.axis = obj._get_axis_number(axis)
self.kind = kind
self.group_keys = group_keys
self.as_index = True
self.obj, self.ax, self._indexer = self._timegrouper._set_grouper(
self._convert_obj(obj), sort=True, gpr_index=gpr_index
)
self.binner, self.grouper = self._get_binner()
self._selection = selection
if self._timegrouper.key is not None:
self.exclusions = frozenset([self._timegrouper.key])
else:
self.exclusions = frozenset()
def __str__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
f"{k}={getattr(self._timegrouper, k)}"
for k in self._attributes
if getattr(self._timegrouper, k, None) is not None
)
return f"{type(self).__name__} [{', '.join(attrs)}]"
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self._attributes:
return getattr(self._timegrouper, attr)
if attr in self.obj:
return self[attr]
return object.__getattribute__(self, attr)
def _from_selection(self) -> bool:
"""
Is the resampling from a DataFrame column or MultiIndex level.
"""
# upsampling and PeriodIndex resampling do not work
# with selection, this state used to catch and raise an error
return self._timegrouper is not None and (
self._timegrouper.key is not None or self._timegrouper.level is not None
)
def _convert_obj(self, obj: NDFrameT) -> NDFrameT:
"""
Provide any conversions for the object in order to correctly handle.
Parameters
----------
obj : Series or DataFrame
Returns
-------
Series or DataFrame
"""
return obj._consolidate()
def _get_binner_for_time(self):
raise AbstractMethodError(self)
def _get_binner(self):
"""
Create the BinGrouper, assume that self.set_grouper(obj)
has already been called.
"""
binner, bins, binlabels = self._get_binner_for_time()
assert len(bins) == len(binlabels)
bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer)
return binner, bin_grouper
klass="Resampler",
examples="""
>>> df = pd.DataFrame({'A': [1, 2, 3, 4]},
... index=pd.date_range('2012-08-02', periods=4))
>>> df
A
2012-08-02 1
2012-08-03 2
2012-08-04 3
2012-08-05 4
To get the difference between each 2-day period's maximum and minimum
value in one pass, you can do
>>> df.resample('2D').pipe(lambda x: x.max() - x.min())
A
2012-08-02 1
2012-08-04 1""",
)
def pipe(
self,
func: Callable[..., T] | tuple[Callable[..., T], str],
*args,
**kwargs,
) -> T:
return super().pipe(func, *args, **kwargs)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.groupby.aggregate : Aggregate using callable, string, dict,
or list of string/callables.
DataFrame.resample.transform : Transforms the Series on each group
based on the given function.
DataFrame.aggregate: Aggregate using one or more
operations over the specified axis.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5],
... index=pd.date_range('20130101', periods=5, freq='s'))
>>> s
2013-01-01 00:00:00 1
2013-01-01 00:00:01 2
2013-01-01 00:00:02 3
2013-01-01 00:00:03 4
2013-01-01 00:00:04 5
Freq: S, dtype: int64
>>> r = s.resample('2s')
>>> r.agg(np.sum)
2013-01-01 00:00:00 3
2013-01-01 00:00:02 7
2013-01-01 00:00:04 5
Freq: 2S, dtype: int64
>>> r.agg(['sum', 'mean', 'max'])
sum mean max
2013-01-01 00:00:00 3 1.5 2
2013-01-01 00:00:02 7 3.5 4
2013-01-01 00:00:04 5 5.0 5
>>> r.agg({'result': lambda x: x.mean() / x.std(),
... 'total': np.sum})
result total
2013-01-01 00:00:00 2.121320 3
2013-01-01 00:00:02 4.949747 7
2013-01-01 00:00:04 NaN 5
>>> r.agg(average="mean", total="sum")
average total
2013-01-01 00:00:00 1.5 3
2013-01-01 00:00:02 3.5 7
2013-01-01 00:00:04 5.0 5
"""
)
_shared_docs["aggregate"],
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
klass="DataFrame",
axis="",
)
def aggregate(self, func=None, *args, **kwargs):
result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
if result is None:
how = func
result = self._groupby_and_aggregate(how, *args, **kwargs)
return result
agg = aggregate
apply = aggregate
def transform(self, arg, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group.
Return a Series with the transformed values.
Parameters
----------
arg : function
To apply to each group. Should return a Series with the same index.
Returns
-------
Series
Examples
--------
>>> s = pd.Series([1, 2],
... index=pd.date_range('20180101',
... periods=2,
... freq='1h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
Freq: H, dtype: int64
>>> resampled = s.resample('15min')
>>> resampled.transform(lambda x: (x - x.mean()) / x.std())
2018-01-01 00:00:00 NaN
2018-01-01 01:00:00 NaN
Freq: H, dtype: float64
"""
return self._selected_obj.groupby(self._timegrouper).transform(
arg, *args, **kwargs
)
def _downsample(self, f, **kwargs):
raise AbstractMethodError(self)
def _upsample(self, f, limit=None, fill_value=None):
raise AbstractMethodError(self)
def _gotitem(self, key, ndim: int, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
grouper = self.grouper
if subset is None:
subset = self.obj
if key is not None:
subset = subset[key]
else:
# reached via Apply.agg_dict_like with selection=None and ndim=1
assert subset.ndim == 1
if ndim == 1:
assert subset.ndim == 1
grouped = get_groupby(
subset, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys
)
return grouped
def _groupby_and_aggregate(self, how, *args, **kwargs):
"""
Re-evaluate the obj with a groupby aggregation.
"""
grouper = self.grouper
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
# Excludes `on` column when provided
obj = self._obj_with_exclusions
grouped = get_groupby(
obj, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys
)
try:
if callable(how):
# TODO: test_resample_apply_with_additional_args fails if we go
# through the non-lambda path, not clear that it should.
func = lambda x: how(x, *args, **kwargs)
result = grouped.aggregate(func)
else:
result = grouped.aggregate(how, *args, **kwargs)
except (AttributeError, KeyError):
# we have a non-reducing function; try to evaluate
# alternatively we want to evaluate only a column of the input
# test_apply_to_one_column_of_df the function being applied references
# a DataFrame column, but aggregate_item_by_item operates column-wise
# on Series, raising AttributeError or KeyError
# (depending on whether the column lookup uses getattr/__getitem__)
result = grouped.apply(how, *args, **kwargs)
except ValueError as err:
if "Must produce aggregated value" in str(err):
# raised in _aggregate_named
# see test_apply_without_aggregation, test_apply_with_mutated_index
pass
else:
raise
# we have a non-reducing function
# try to evaluate
result = grouped.apply(how, *args, **kwargs)
return self._wrap_result(result)
def _get_resampler_for_grouping(self, groupby: GroupBy, key):
"""
Return the correct class for resampling with groupby.
"""
return self._resampler_for_grouping(groupby=groupby, key=key, parent=self)
def _wrap_result(self, result):
"""
Potentially wrap any results.
"""
# GH 47705
obj = self.obj
if (
isinstance(result, ABCDataFrame)
and result.empty
and not isinstance(result.index, PeriodIndex)
):
result = result.set_index(
_asfreq_compat(obj.index[:0], freq=self.freq), append=True
)
if isinstance(result, ABCSeries) and self._selection is not None:
result.name = self._selection
if isinstance(result, ABCSeries) and result.empty:
# When index is all NaT, result is empty but index is not
result.index = _asfreq_compat(obj.index[:0], freq=self.freq)
result.name = getattr(obj, "name", None)
return result
def ffill(self, limit=None):
"""
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
An upsampled Series.
See Also
--------
Series.fillna: Fill NA/NaN values using the specified method.
DataFrame.fillna: Fill NA/NaN values using the specified method.
"""
return self._upsample("ffill", limit=limit)
def nearest(self, limit=None):
"""
Resample by using the nearest value.
When resampling data, missing values may appear (e.g., when the
resampling frequency is higher than the original frequency).
The `nearest` method will replace ``NaN`` values that appeared in
the resampled data with the value from the nearest member of the
sequence, based on the index value.
Missing values that existed in the original data will not be modified.
If `limit` is given, fill only this many values in each direction for
each of the original values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
An upsampled Series or DataFrame with ``NaN`` values filled with
their nearest value.
See Also
--------
backfill : Backward fill the new missing values in the resampled data.
pad : Forward fill ``NaN`` values.
Examples
--------
>>> s = pd.Series([1, 2],
... index=pd.date_range('20180101',
... periods=2,
... freq='1h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
Freq: H, dtype: int64
>>> s.resample('15min').nearest()
2018-01-01 00:00:00 1
2018-01-01 00:15:00 1
2018-01-01 00:30:00 2
2018-01-01 00:45:00 2
2018-01-01 01:00:00 2
Freq: 15T, dtype: int64
Limit the number of upsampled values imputed by the nearest:
>>> s.resample('15min').nearest(limit=1)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
Freq: 15T, dtype: float64
"""
return self._upsample("nearest", limit=limit)
def bfill(self, limit=None):
"""
Backward fill the new missing values in the resampled data.
In statistics, imputation is the process of replacing missing data with
substituted values [1]_. When resampling data, missing values may
appear (e.g., when the resampling frequency is higher than the original
frequency). The backward fill will replace NaN values that appeared in
the resampled data with the next value in the original sequence.
Missing values that existed in the original data will not be modified.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series, DataFrame
An upsampled Series or DataFrame with backward filled NaN values.
See Also
--------
bfill : Alias of backfill.
fillna : Fill NaN values using the specified method, which can be
'backfill'.
nearest : Fill NaN values with nearest neighbor starting from center.
ffill : Forward fill NaN values.
Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'backfill'.
DataFrame.fillna : Fill NaN values in the DataFrame using the
specified method, which can be 'backfill'.
References
----------
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
Examples
--------
Resampling a Series:
>>> s = pd.Series([1, 2, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: H, dtype: int64
>>> s.resample('30min').bfill()
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('15min').bfill(limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
2018-01-01 01:15:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 01:45:00 3.0
2018-01-01 02:00:00 3.0
Freq: 15T, dtype: float64
Resampling a DataFrame that has missing values:
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
... index=pd.date_range('20180101', periods=3,
... freq='h'))
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
>>> df.resample('30min').bfill()
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
2018-01-01 01:00:00 NaN 3
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
>>> df.resample('15min').bfill(limit=2)
a b
2018-01-01 00:00:00 2.0 1.0
2018-01-01 00:15:00 NaN NaN
2018-01-01 00:30:00 NaN 3.0
2018-01-01 00:45:00 NaN 3.0
2018-01-01 01:00:00 NaN 3.0
2018-01-01 01:15:00 NaN NaN
2018-01-01 01:30:00 6.0 5.0
2018-01-01 01:45:00 6.0 5.0
2018-01-01 02:00:00 6.0 5.0
"""
return self._upsample("bfill", limit=limit)
def fillna(self, method, limit=None):
"""
Fill missing values introduced by upsampling.
In statistics, imputation is the process of replacing missing data with
substituted values [1]_. When resampling data, missing values may
appear (e.g., when the resampling frequency is higher than the original
frequency).
Missing values that existed in the original data will
not be modified.
Parameters
----------
method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}
Method to use for filling holes in resampled data
* 'pad' or 'ffill': use previous valid observation to fill gap
(forward fill).
* 'backfill' or 'bfill': use next valid observation to fill gap.
* 'nearest': use nearest valid observation to fill gap.
limit : int, optional
Limit of how many consecutive missing values to fill.
Returns
-------
Series or DataFrame
An upsampled Series or DataFrame with missing values filled.
See Also
--------
bfill : Backward fill NaN values in the resampled data.
ffill : Forward fill NaN values in the resampled data.
nearest : Fill NaN values in the resampled data
with nearest neighbor starting from center.
interpolate : Fill NaN values using interpolation.
Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'bfill' and 'ffill'.
DataFrame.fillna : Fill NaN values in the DataFrame using the
specified method, which can be 'bfill' and 'ffill'.
References
----------
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
Examples
--------
Resampling a Series:
>>> s = pd.Series([1, 2, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: H, dtype: int64
Without filling the missing values you get:
>>> s.resample("30min").asfreq()
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 2.0
2018-01-01 01:30:00 NaN
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> s.resample('30min').fillna("backfill")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('15min').fillna("backfill", limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
2018-01-01 01:15:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 01:45:00 3.0
2018-01-01 02:00:00 3.0
Freq: 15T, dtype: float64
>>> s.resample('30min').fillna("pad")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 1
2018-01-01 01:00:00 2
2018-01-01 01:30:00 2
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('30min').fillna("nearest")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
Missing values present before the upsampling are not affected.
>>> sm = pd.Series([1, None, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> sm
2018-01-01 00:00:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 02:00:00 3.0
Freq: H, dtype: float64
>>> sm.resample('30min').fillna('backfill')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> sm.resample('30min').fillna('pad')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 NaN
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> sm.resample('30min').fillna('nearest')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
DataFrame resampling is done column-wise. All the same options are
available.
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
... index=pd.date_range('20180101', periods=3,
... freq='h'))
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
>>> df.resample('30min').fillna("bfill")
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
2018-01-01 01:00:00 NaN 3
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
"""
return self._upsample(method, limit=limit)
def interpolate(
self,
method: QuantileInterpolation = "linear",
*,
axis: Axis = 0,
limit=None,
inplace: bool = False,
limit_direction: Literal["forward", "backward", "both"] = "forward",
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate values according to different methods.
"""
result = self._upsample("asfreq")
return result.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def asfreq(self, fill_value=None):
"""
Return the values at the new freq, essentially a reindex.
Parameters
----------
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
DataFrame or Series
Values at the specified freq.
See Also
--------
Series.asfreq: Convert TimeSeries to specified frequency.
DataFrame.asfreq: Convert TimeSeries to specified frequency.
"""
return self._upsample("asfreq", fill_value=fill_value)
def sum(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "sum", args, kwargs)
nv.validate_resampler_func("sum", args, kwargs)
return self._downsample("sum", numeric_only=numeric_only, min_count=min_count)
def prod(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "prod", args, kwargs)
nv.validate_resampler_func("prod", args, kwargs)
return self._downsample("prod", numeric_only=numeric_only, min_count=min_count)
def min(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "min", args, kwargs)
nv.validate_resampler_func("min", args, kwargs)
return self._downsample("min", numeric_only=numeric_only, min_count=min_count)
def max(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "max", args, kwargs)
nv.validate_resampler_func("max", args, kwargs)
return self._downsample("max", numeric_only=numeric_only, min_count=min_count)
def first(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "first", args, kwargs)
nv.validate_resampler_func("first", args, kwargs)
return self._downsample("first", numeric_only=numeric_only, min_count=min_count)
def last(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "last", args, kwargs)
nv.validate_resampler_func("last", args, kwargs)
return self._downsample("last", numeric_only=numeric_only, min_count=min_count)
def median(self, numeric_only: bool = False, *args, **kwargs):
maybe_warn_args_and_kwargs(type(self), "median", args, kwargs)
nv.validate_resampler_func("median", args, kwargs)
return self._downsample("median", numeric_only=numeric_only)
def mean(
self,
numeric_only: bool = False,
*args,
**kwargs,
):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
DataFrame or Series
Mean of values within each group.
"""
maybe_warn_args_and_kwargs(type(self), "mean", args, kwargs)
nv.validate_resampler_func("mean", args, kwargs)
return self._downsample("mean", numeric_only=numeric_only)
def std(
self,
ddof: int = 1,
numeric_only: bool = False,
*args,
**kwargs,
):
"""
Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
DataFrame or Series
Standard deviation of values within each group.
"""
maybe_warn_args_and_kwargs(type(self), "std", args, kwargs)
nv.validate_resampler_func("std", args, kwargs)
return self._downsample("std", ddof=ddof, numeric_only=numeric_only)
def var(
self,
ddof: int = 1,
numeric_only: bool = False,
*args,
**kwargs,
):
"""
Compute variance of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
DataFrame or Series
Variance of values within each group.
"""
maybe_warn_args_and_kwargs(type(self), "var", args, kwargs)
nv.validate_resampler_func("var", args, kwargs)
return self._downsample("var", ddof=ddof, numeric_only=numeric_only)
def sem(
self,
ddof: int = 1,
numeric_only: bool = False,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "sem", args, kwargs)
nv.validate_resampler_func("sem", args, kwargs)
return self._downsample("sem", ddof=ddof, numeric_only=numeric_only)
def ohlc(
self,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "ohlc", args, kwargs)
nv.validate_resampler_func("ohlc", args, kwargs)
return self._downsample("ohlc")
def nunique(
self,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "nunique", args, kwargs)
nv.validate_resampler_func("nunique", args, kwargs)
return self._downsample("nunique")
def size(self):
result = self._downsample("size")
# If the result is a non-empty DataFrame we stack to get a Series
# GH 46826
if isinstance(result, ABCDataFrame) and not result.empty:
result = result.stack()
if not len(self.ax):
from pandas import Series
if self._selected_obj.ndim == 1:
name = self._selected_obj.name
else:
name = None
result = Series([], index=result.index, dtype="int64", name=name)
return result
def count(self):
result = self._downsample("count")
if not len(self.ax):
if self._selected_obj.ndim == 1:
result = type(self._selected_obj)(
[], index=result.index, dtype="int64", name=self._selected_obj.name
)
else:
from pandas import DataFrame
result = DataFrame(
[], index=result.index, columns=result.columns, dtype="int64"
)
return result
def quantile(self, q: float | AnyArrayLike = 0.5, **kwargs):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Returns
-------
DataFrame or Series
Quantile of values within each group.
See Also
--------
Series.quantile
Return a series, where the index is q and the values are the quantiles.
DataFrame.quantile
Return a DataFrame, where the columns are the columns of self,
and the values are the quantiles.
DataFrameGroupBy.quantile
Return a DataFrame, where the columns are groupby columns,
and the values are its quantiles.
"""
return self._downsample("quantile", q=q, **kwargs)
class TimeGrouper(Grouper):
"""
Custom groupby class for time-interval grouping.
Parameters
----------
freq : pandas date offset or offset alias for identifying bin edges
closed : closed end of interval; 'left' or 'right'
label : interval boundary to use for labeling; 'left' or 'right'
convention : {'start', 'end', 'e', 's'}
If axis is PeriodIndex
"""
_attributes = Grouper._attributes + (
"closed",
"label",
"how",
"kind",
"convention",
"origin",
"offset",
)
origin: TimeGrouperOrigin
def __init__(
self,
freq: Frequency = "Min",
closed: Literal["left", "right"] | None = None,
label: Literal["left", "right"] | None = None,
how: str = "mean",
axis: Axis = 0,
fill_method=None,
limit=None,
kind: str | None = None,
convention: Literal["start", "end", "e", "s"] | None = None,
origin: Literal["epoch", "start", "start_day", "end", "end_day"]
| TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
group_keys: bool = False,
**kwargs,
) -> None:
# Check for correctness of the keyword arguments which would
# otherwise silently use the default if misspelled
if label not in {None, "left", "right"}:
raise ValueError(f"Unsupported value {label} for `label`")
if closed not in {None, "left", "right"}:
raise ValueError(f"Unsupported value {closed} for `closed`")
if convention not in {None, "start", "end", "e", "s"}:
raise ValueError(f"Unsupported value {convention} for `convention`")
freq = to_offset(freq)
end_types = {"M", "A", "Q", "BM", "BA", "BQ", "W"}
rule = freq.rule_code
if rule in end_types or ("-" in rule and rule[: rule.find("-")] in end_types):
if closed is None:
closed = "right"
if label is None:
label = "right"
else:
# The backward resample sets ``closed`` to ``'right'`` by default
# since the last value should be considered as the edge point for
# the last bin. When origin in "end" or "end_day", the value for a
# specific ``Timestamp`` index stands for the resample result from
# the current ``Timestamp`` minus ``freq`` to the current
# ``Timestamp`` with a right close.
if origin in ["end", "end_day"]:
if closed is None:
closed = "right"
if label is None:
label = "right"
else:
if closed is None:
closed = "left"
if label is None:
label = "left"
self.closed = closed
self.label = label
self.kind = kind
self.convention = convention if convention is not None else "e"
self.how = how
self.fill_method = fill_method
self.limit = limit
self.group_keys = group_keys
if origin in ("epoch", "start", "start_day", "end", "end_day"):
# error: Incompatible types in assignment (expression has type "Union[Union[
# Timestamp, datetime, datetime64, signedinteger[_64Bit], float, str],
# Literal['epoch', 'start', 'start_day', 'end', 'end_day']]", variable has
# type "Union[Timestamp, Literal['epoch', 'start', 'start_day', 'end',
# 'end_day']]")
self.origin = origin # type: ignore[assignment]
else:
try:
self.origin = Timestamp(origin)
except (ValueError, TypeError) as err:
raise ValueError(
"'origin' should be equal to 'epoch', 'start', 'start_day', "
"'end', 'end_day' or "
f"should be a Timestamp convertible type. Got '{origin}' instead."
) from err
try:
self.offset = Timedelta(offset) if offset is not None else None
except (ValueError, TypeError) as err:
raise ValueError(
"'offset' should be a Timedelta convertible type. "
f"Got '{offset}' instead."
) from err
# always sort time groupers
kwargs["sort"] = True
super().__init__(freq=freq, axis=axis, **kwargs)
def _get_resampler(self, obj: NDFrame, kind=None) -> Resampler:
"""
Return my resampler or raise if we have an invalid axis.
Parameters
----------
obj : Series or DataFrame
kind : string, optional
'period','timestamp','timedelta' are valid
Returns
-------
Resampler
Raises
------
TypeError if incompatible axis
"""
_, ax, indexer = self._set_grouper(obj, gpr_index=None)
if isinstance(ax, DatetimeIndex):
return DatetimeIndexResampler(
obj,
timegrouper=self,
kind=kind,
axis=self.axis,
group_keys=self.group_keys,
gpr_index=ax,
)
elif isinstance(ax, PeriodIndex) or kind == "period":
return PeriodIndexResampler(
obj,
timegrouper=self,
kind=kind,
axis=self.axis,
group_keys=self.group_keys,
gpr_index=ax,
)
elif isinstance(ax, TimedeltaIndex):
return TimedeltaIndexResampler(
obj,
timegrouper=self,
axis=self.axis,
group_keys=self.group_keys,
gpr_index=ax,
)
raise TypeError(
"Only valid with DatetimeIndex, "
"TimedeltaIndex or PeriodIndex, "
f"but got an instance of '{type(ax).__name__}'"
)
def _get_grouper(
self, obj: NDFrameT, validate: bool = True
) -> tuple[BinGrouper, NDFrameT]:
# create the resampler and return our binner
r = self._get_resampler(obj)
return r.grouper, cast(NDFrameT, r.obj)
def _get_time_bins(self, ax: DatetimeIndex):
if not isinstance(ax, DatetimeIndex):
raise TypeError(
"axis must be a DatetimeIndex, but got "
f"an instance of {type(ax).__name__}"
)
if len(ax) == 0:
binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = _get_timestamp_range_edges(
ax.min(),
ax.max(),
self.freq,
unit=ax.unit,
closed=self.closed,
origin=self.origin,
offset=self.offset,
)
# GH #12037
# use first/last directly instead of call replace() on them
# because replace() will swallow the nanosecond part
# thus last bin maybe slightly before the end if the end contains
# nanosecond part and lead to `Values falls after last bin` error
# GH 25758: If DST lands at midnight (e.g. 'America/Havana'), user feedback
# has noted that ambiguous=True provides the most sensible result
binner = labels = date_range(
freq=self.freq,
start=first,
end=last,
tz=ax.tz,
name=ax.name,
ambiguous=True,
nonexistent="shift_forward",
unit=ax.unit,
)
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(
ax_values, bin_edges, self.closed, hasnans=ax.hasnans
)
if self.closed == "right":
labels = binner
if self.label == "right":
labels = labels[1:]
elif self.label == "right":
labels = labels[1:]
if ax.hasnans:
binner = binner.insert(0, NaT)
labels = labels.insert(0, NaT)
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[: len(bins)]
return binner, bins, labels
def _adjust_bin_edges(
self, binner: DatetimeIndex, ax_values: npt.NDArray[np.int64]
) -> tuple[DatetimeIndex, npt.NDArray[np.int64]]:
# Some hacks for > daily data, see #1471, #1458, #1483
if self.freq != "D" and is_superperiod(self.freq, "D"):
if self.closed == "right":
# GH 21459, GH 9119: Adjust the bins relative to the wall time
edges_dti = binner.tz_localize(None)
edges_dti = (
edges_dti
+ Timedelta(days=1, unit=edges_dti.unit).as_unit(edges_dti.unit)
- Timedelta(1, unit=edges_dti.unit).as_unit(edges_dti.unit)
)
bin_edges = edges_dti.tz_localize(binner.tz).asi8
else:
bin_edges = binner.asi8
# intraday values on last day
if bin_edges[-2] > ax_values.max():
bin_edges = bin_edges[:-1]
binner = binner[:-1]
else:
bin_edges = binner.asi8
return binner, bin_edges
def _get_time_delta_bins(self, ax: TimedeltaIndex):
if not isinstance(ax, TimedeltaIndex):
raise TypeError(
"axis must be a TimedeltaIndex, but got "
f"an instance of {type(ax).__name__}"
)
if not len(ax):
binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
start, end = ax.min(), ax.max()
if self.closed == "right":
end += self.freq
labels = binner = timedelta_range(
start=start, end=end, freq=self.freq, name=ax.name
)
end_stamps = labels
if self.closed == "left":
end_stamps += self.freq
bins = ax.searchsorted(end_stamps, side=self.closed)
if self.offset:
# GH 10530 & 31809
labels += self.offset
return binner, bins, labels
def _get_time_period_bins(self, ax: DatetimeIndex):
if not isinstance(ax, DatetimeIndex):
raise TypeError(
"axis must be a DatetimeIndex, but got "
f"an instance of {type(ax).__name__}"
)
freq = self.freq
if not len(ax):
binner = labels = PeriodIndex(data=[], freq=freq, name=ax.name)
return binner, [], labels
labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name)
end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp()
if ax.tz:
end_stamps = end_stamps.tz_localize(ax.tz)
bins = ax.searchsorted(end_stamps, side="left")
return binner, bins, labels
def _get_period_bins(self, ax: PeriodIndex):
if not isinstance(ax, PeriodIndex):
raise TypeError(
"axis must be a PeriodIndex, but got "
f"an instance of {type(ax).__name__}"
)
memb = ax.asfreq(self.freq, how=self.convention)
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
nat_count = 0
if memb.hasnans:
# error: Incompatible types in assignment (expression has type
# "bool_", variable has type "int") [assignment]
nat_count = np.sum(memb._isnan) # type: ignore[assignment]
memb = memb[~memb._isnan]
if not len(memb):
# index contains no valid (non-NaT) values
bins = np.array([], dtype=np.int64)
binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name)
if len(ax) > 0:
# index is all NaT
binner, bins, labels = _insert_nat_bin(binner, bins, labels, len(ax))
return binner, bins, labels
freq_mult = self.freq.n
start = ax.min().asfreq(self.freq, how=self.convention)
end = ax.max().asfreq(self.freq, how="end")
bin_shift = 0
if isinstance(self.freq, Tick):
# GH 23882 & 31809: get adjusted bin edge labels with 'origin'
# and 'origin' support. This call only makes sense if the freq is a
# Tick since offset and origin are only used in those cases.
# Not doing this check could create an extra empty bin.
p_start, end = _get_period_range_edges(
start,
end,
self.freq,
closed=self.closed,
origin=self.origin,
offset=self.offset,
)
# Get offset for bin edge (not label edge) adjustment
start_offset = Period(start, self.freq) - Period(p_start, self.freq)
# error: Item "Period" of "Union[Period, Any]" has no attribute "n"
bin_shift = start_offset.n % freq_mult # type: ignore[union-attr]
start = p_start
labels = binner = period_range(
start=start, end=end, freq=self.freq, name=ax.name
)
i8 = memb.asi8
# when upsampling to subperiods, we need to generate enough bins
expected_bins_count = len(binner) * freq_mult
i8_extend = expected_bins_count - (i8[-1] - i8[0])
rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult)
rng += freq_mult
# adjust bin edge indexes to account for base
rng -= bin_shift
# Wrap in PeriodArray for PeriodArray.searchsorted
prng = type(memb._data)(rng, dtype=memb.dtype)
bins = memb.searchsorted(prng, side="left")
if nat_count > 0:
binner, bins, labels = _insert_nat_bin(binner, bins, labels, nat_count)
return binner, bins, labels
The provided code snippet includes necessary dependencies for implementing the `get_resampler` function. Write a Python function `def get_resampler(obj: Series | DataFrame, kind=None, **kwds) -> Resampler` to solve the following problem:
Create a TimeGrouper and return our resampler.
Here is the function:
def get_resampler(obj: Series | DataFrame, kind=None, **kwds) -> Resampler:
"""
Create a TimeGrouper and return our resampler.
"""
tg = TimeGrouper(**kwds)
return tg._get_resampler(obj, kind=kind) | Create a TimeGrouper and return our resampler. |
173,234 | from __future__ import annotations
import copy
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Callable,
Hashable,
Literal,
cast,
final,
no_type_check,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._libs.tslibs import (
BaseOffset,
IncompatibleFrequency,
NaT,
Period,
Timedelta,
Timestamp,
to_offset,
)
from pandas._typing import (
AnyArrayLike,
Axis,
AxisInt,
Frequency,
IndexLabel,
NDFrameT,
QuantileInterpolation,
T,
TimedeltaConvertibleTypes,
TimeGrouperOrigin,
TimestampConvertibleTypes,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
import pandas.core.algorithms as algos
from pandas.core.apply import ResamplerWindowApply
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.generic import (
NDFrame,
_shared_docs,
)
from pandas.core.groupby.generic import SeriesGroupBy
from pandas.core.groupby.groupby import (
BaseGroupBy,
GroupBy,
_pipe_template,
get_groupby,
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.ops import BinGrouper
from pandas.core.indexes.datetimes import (
DatetimeIndex,
date_range,
)
from pandas.core.indexes.period import (
PeriodIndex,
period_range,
)
from pandas.core.indexes.timedeltas import (
TimedeltaIndex,
timedelta_range,
)
from pandas.tseries.frequencies import (
is_subperiod,
is_superperiod,
)
from pandas.tseries.offsets import (
Day,
Tick,
)
class Resampler(BaseGroupBy, PandasObject):
"""
Class for resampling datetimelike data, a groupby-like operation.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.resample(...) to use Resampler.
Parameters
----------
obj : Series or DataFrame
groupby : TimeGrouper
axis : int, default 0
kind : str or None
'period', 'timestamp' to override default index treatment
Returns
-------
a Resampler of the appropriate type
Notes
-----
After resampling, see aggregate, apply, and transform functions.
"""
grouper: BinGrouper
_timegrouper: TimeGrouper
binner: DatetimeIndex | TimedeltaIndex | PeriodIndex # depends on subclass
exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat
_internal_names_set = set({"obj", "ax", "_indexer"})
# to the groupby descriptor
_attributes = [
"freq",
"axis",
"closed",
"label",
"convention",
"kind",
"origin",
"offset",
]
def __init__(
self,
obj: NDFrame,
timegrouper: TimeGrouper,
axis: Axis = 0,
kind=None,
*,
gpr_index: Index,
group_keys: bool = False,
selection=None,
) -> None:
self._timegrouper = timegrouper
self.keys = None
self.sort = True
self.axis = obj._get_axis_number(axis)
self.kind = kind
self.group_keys = group_keys
self.as_index = True
self.obj, self.ax, self._indexer = self._timegrouper._set_grouper(
self._convert_obj(obj), sort=True, gpr_index=gpr_index
)
self.binner, self.grouper = self._get_binner()
self._selection = selection
if self._timegrouper.key is not None:
self.exclusions = frozenset([self._timegrouper.key])
else:
self.exclusions = frozenset()
def __str__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
f"{k}={getattr(self._timegrouper, k)}"
for k in self._attributes
if getattr(self._timegrouper, k, None) is not None
)
return f"{type(self).__name__} [{', '.join(attrs)}]"
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self._attributes:
return getattr(self._timegrouper, attr)
if attr in self.obj:
return self[attr]
return object.__getattribute__(self, attr)
def _from_selection(self) -> bool:
"""
Is the resampling from a DataFrame column or MultiIndex level.
"""
# upsampling and PeriodIndex resampling do not work
# with selection, this state used to catch and raise an error
return self._timegrouper is not None and (
self._timegrouper.key is not None or self._timegrouper.level is not None
)
def _convert_obj(self, obj: NDFrameT) -> NDFrameT:
"""
Provide any conversions for the object in order to correctly handle.
Parameters
----------
obj : Series or DataFrame
Returns
-------
Series or DataFrame
"""
return obj._consolidate()
def _get_binner_for_time(self):
raise AbstractMethodError(self)
def _get_binner(self):
"""
Create the BinGrouper, assume that self.set_grouper(obj)
has already been called.
"""
binner, bins, binlabels = self._get_binner_for_time()
assert len(bins) == len(binlabels)
bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer)
return binner, bin_grouper
klass="Resampler",
examples="""
>>> df = pd.DataFrame({'A': [1, 2, 3, 4]},
... index=pd.date_range('2012-08-02', periods=4))
>>> df
A
2012-08-02 1
2012-08-03 2
2012-08-04 3
2012-08-05 4
To get the difference between each 2-day period's maximum and minimum
value in one pass, you can do
>>> df.resample('2D').pipe(lambda x: x.max() - x.min())
A
2012-08-02 1
2012-08-04 1""",
)
def pipe(
self,
func: Callable[..., T] | tuple[Callable[..., T], str],
*args,
**kwargs,
) -> T:
return super().pipe(func, *args, **kwargs)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.groupby.aggregate : Aggregate using callable, string, dict,
or list of string/callables.
DataFrame.resample.transform : Transforms the Series on each group
based on the given function.
DataFrame.aggregate: Aggregate using one or more
operations over the specified axis.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5],
... index=pd.date_range('20130101', periods=5, freq='s'))
>>> s
2013-01-01 00:00:00 1
2013-01-01 00:00:01 2
2013-01-01 00:00:02 3
2013-01-01 00:00:03 4
2013-01-01 00:00:04 5
Freq: S, dtype: int64
>>> r = s.resample('2s')
>>> r.agg(np.sum)
2013-01-01 00:00:00 3
2013-01-01 00:00:02 7
2013-01-01 00:00:04 5
Freq: 2S, dtype: int64
>>> r.agg(['sum', 'mean', 'max'])
sum mean max
2013-01-01 00:00:00 3 1.5 2
2013-01-01 00:00:02 7 3.5 4
2013-01-01 00:00:04 5 5.0 5
>>> r.agg({'result': lambda x: x.mean() / x.std(),
... 'total': np.sum})
result total
2013-01-01 00:00:00 2.121320 3
2013-01-01 00:00:02 4.949747 7
2013-01-01 00:00:04 NaN 5
>>> r.agg(average="mean", total="sum")
average total
2013-01-01 00:00:00 1.5 3
2013-01-01 00:00:02 3.5 7
2013-01-01 00:00:04 5.0 5
"""
)
_shared_docs["aggregate"],
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
klass="DataFrame",
axis="",
)
def aggregate(self, func=None, *args, **kwargs):
result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
if result is None:
how = func
result = self._groupby_and_aggregate(how, *args, **kwargs)
return result
agg = aggregate
apply = aggregate
def transform(self, arg, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group.
Return a Series with the transformed values.
Parameters
----------
arg : function
To apply to each group. Should return a Series with the same index.
Returns
-------
Series
Examples
--------
>>> s = pd.Series([1, 2],
... index=pd.date_range('20180101',
... periods=2,
... freq='1h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
Freq: H, dtype: int64
>>> resampled = s.resample('15min')
>>> resampled.transform(lambda x: (x - x.mean()) / x.std())
2018-01-01 00:00:00 NaN
2018-01-01 01:00:00 NaN
Freq: H, dtype: float64
"""
return self._selected_obj.groupby(self._timegrouper).transform(
arg, *args, **kwargs
)
def _downsample(self, f, **kwargs):
raise AbstractMethodError(self)
def _upsample(self, f, limit=None, fill_value=None):
raise AbstractMethodError(self)
def _gotitem(self, key, ndim: int, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
grouper = self.grouper
if subset is None:
subset = self.obj
if key is not None:
subset = subset[key]
else:
# reached via Apply.agg_dict_like with selection=None and ndim=1
assert subset.ndim == 1
if ndim == 1:
assert subset.ndim == 1
grouped = get_groupby(
subset, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys
)
return grouped
def _groupby_and_aggregate(self, how, *args, **kwargs):
"""
Re-evaluate the obj with a groupby aggregation.
"""
grouper = self.grouper
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
# Excludes `on` column when provided
obj = self._obj_with_exclusions
grouped = get_groupby(
obj, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys
)
try:
if callable(how):
# TODO: test_resample_apply_with_additional_args fails if we go
# through the non-lambda path, not clear that it should.
func = lambda x: how(x, *args, **kwargs)
result = grouped.aggregate(func)
else:
result = grouped.aggregate(how, *args, **kwargs)
except (AttributeError, KeyError):
# we have a non-reducing function; try to evaluate
# alternatively we want to evaluate only a column of the input
# test_apply_to_one_column_of_df the function being applied references
# a DataFrame column, but aggregate_item_by_item operates column-wise
# on Series, raising AttributeError or KeyError
# (depending on whether the column lookup uses getattr/__getitem__)
result = grouped.apply(how, *args, **kwargs)
except ValueError as err:
if "Must produce aggregated value" in str(err):
# raised in _aggregate_named
# see test_apply_without_aggregation, test_apply_with_mutated_index
pass
else:
raise
# we have a non-reducing function
# try to evaluate
result = grouped.apply(how, *args, **kwargs)
return self._wrap_result(result)
def _get_resampler_for_grouping(self, groupby: GroupBy, key):
"""
Return the correct class for resampling with groupby.
"""
return self._resampler_for_grouping(groupby=groupby, key=key, parent=self)
def _wrap_result(self, result):
"""
Potentially wrap any results.
"""
# GH 47705
obj = self.obj
if (
isinstance(result, ABCDataFrame)
and result.empty
and not isinstance(result.index, PeriodIndex)
):
result = result.set_index(
_asfreq_compat(obj.index[:0], freq=self.freq), append=True
)
if isinstance(result, ABCSeries) and self._selection is not None:
result.name = self._selection
if isinstance(result, ABCSeries) and result.empty:
# When index is all NaT, result is empty but index is not
result.index = _asfreq_compat(obj.index[:0], freq=self.freq)
result.name = getattr(obj, "name", None)
return result
def ffill(self, limit=None):
"""
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
An upsampled Series.
See Also
--------
Series.fillna: Fill NA/NaN values using the specified method.
DataFrame.fillna: Fill NA/NaN values using the specified method.
"""
return self._upsample("ffill", limit=limit)
def nearest(self, limit=None):
"""
Resample by using the nearest value.
When resampling data, missing values may appear (e.g., when the
resampling frequency is higher than the original frequency).
The `nearest` method will replace ``NaN`` values that appeared in
the resampled data with the value from the nearest member of the
sequence, based on the index value.
Missing values that existed in the original data will not be modified.
If `limit` is given, fill only this many values in each direction for
each of the original values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
An upsampled Series or DataFrame with ``NaN`` values filled with
their nearest value.
See Also
--------
backfill : Backward fill the new missing values in the resampled data.
pad : Forward fill ``NaN`` values.
Examples
--------
>>> s = pd.Series([1, 2],
... index=pd.date_range('20180101',
... periods=2,
... freq='1h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
Freq: H, dtype: int64
>>> s.resample('15min').nearest()
2018-01-01 00:00:00 1
2018-01-01 00:15:00 1
2018-01-01 00:30:00 2
2018-01-01 00:45:00 2
2018-01-01 01:00:00 2
Freq: 15T, dtype: int64
Limit the number of upsampled values imputed by the nearest:
>>> s.resample('15min').nearest(limit=1)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
Freq: 15T, dtype: float64
"""
return self._upsample("nearest", limit=limit)
def bfill(self, limit=None):
"""
Backward fill the new missing values in the resampled data.
In statistics, imputation is the process of replacing missing data with
substituted values [1]_. When resampling data, missing values may
appear (e.g., when the resampling frequency is higher than the original
frequency). The backward fill will replace NaN values that appeared in
the resampled data with the next value in the original sequence.
Missing values that existed in the original data will not be modified.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series, DataFrame
An upsampled Series or DataFrame with backward filled NaN values.
See Also
--------
bfill : Alias of backfill.
fillna : Fill NaN values using the specified method, which can be
'backfill'.
nearest : Fill NaN values with nearest neighbor starting from center.
ffill : Forward fill NaN values.
Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'backfill'.
DataFrame.fillna : Fill NaN values in the DataFrame using the
specified method, which can be 'backfill'.
References
----------
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
Examples
--------
Resampling a Series:
>>> s = pd.Series([1, 2, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: H, dtype: int64
>>> s.resample('30min').bfill()
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('15min').bfill(limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
2018-01-01 01:15:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 01:45:00 3.0
2018-01-01 02:00:00 3.0
Freq: 15T, dtype: float64
Resampling a DataFrame that has missing values:
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
... index=pd.date_range('20180101', periods=3,
... freq='h'))
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
>>> df.resample('30min').bfill()
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
2018-01-01 01:00:00 NaN 3
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
>>> df.resample('15min').bfill(limit=2)
a b
2018-01-01 00:00:00 2.0 1.0
2018-01-01 00:15:00 NaN NaN
2018-01-01 00:30:00 NaN 3.0
2018-01-01 00:45:00 NaN 3.0
2018-01-01 01:00:00 NaN 3.0
2018-01-01 01:15:00 NaN NaN
2018-01-01 01:30:00 6.0 5.0
2018-01-01 01:45:00 6.0 5.0
2018-01-01 02:00:00 6.0 5.0
"""
return self._upsample("bfill", limit=limit)
def fillna(self, method, limit=None):
"""
Fill missing values introduced by upsampling.
In statistics, imputation is the process of replacing missing data with
substituted values [1]_. When resampling data, missing values may
appear (e.g., when the resampling frequency is higher than the original
frequency).
Missing values that existed in the original data will
not be modified.
Parameters
----------
method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}
Method to use for filling holes in resampled data
* 'pad' or 'ffill': use previous valid observation to fill gap
(forward fill).
* 'backfill' or 'bfill': use next valid observation to fill gap.
* 'nearest': use nearest valid observation to fill gap.
limit : int, optional
Limit of how many consecutive missing values to fill.
Returns
-------
Series or DataFrame
An upsampled Series or DataFrame with missing values filled.
See Also
--------
bfill : Backward fill NaN values in the resampled data.
ffill : Forward fill NaN values in the resampled data.
nearest : Fill NaN values in the resampled data
with nearest neighbor starting from center.
interpolate : Fill NaN values using interpolation.
Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'bfill' and 'ffill'.
DataFrame.fillna : Fill NaN values in the DataFrame using the
specified method, which can be 'bfill' and 'ffill'.
References
----------
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
Examples
--------
Resampling a Series:
>>> s = pd.Series([1, 2, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: H, dtype: int64
Without filling the missing values you get:
>>> s.resample("30min").asfreq()
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 2.0
2018-01-01 01:30:00 NaN
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> s.resample('30min').fillna("backfill")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('15min').fillna("backfill", limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
2018-01-01 01:15:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 01:45:00 3.0
2018-01-01 02:00:00 3.0
Freq: 15T, dtype: float64
>>> s.resample('30min').fillna("pad")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 1
2018-01-01 01:00:00 2
2018-01-01 01:30:00 2
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('30min').fillna("nearest")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
Missing values present before the upsampling are not affected.
>>> sm = pd.Series([1, None, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> sm
2018-01-01 00:00:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 02:00:00 3.0
Freq: H, dtype: float64
>>> sm.resample('30min').fillna('backfill')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> sm.resample('30min').fillna('pad')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 NaN
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> sm.resample('30min').fillna('nearest')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
DataFrame resampling is done column-wise. All the same options are
available.
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
... index=pd.date_range('20180101', periods=3,
... freq='h'))
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
>>> df.resample('30min').fillna("bfill")
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
2018-01-01 01:00:00 NaN 3
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
"""
return self._upsample(method, limit=limit)
def interpolate(
self,
method: QuantileInterpolation = "linear",
*,
axis: Axis = 0,
limit=None,
inplace: bool = False,
limit_direction: Literal["forward", "backward", "both"] = "forward",
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate values according to different methods.
"""
result = self._upsample("asfreq")
return result.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def asfreq(self, fill_value=None):
"""
Return the values at the new freq, essentially a reindex.
Parameters
----------
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
DataFrame or Series
Values at the specified freq.
See Also
--------
Series.asfreq: Convert TimeSeries to specified frequency.
DataFrame.asfreq: Convert TimeSeries to specified frequency.
"""
return self._upsample("asfreq", fill_value=fill_value)
def sum(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "sum", args, kwargs)
nv.validate_resampler_func("sum", args, kwargs)
return self._downsample("sum", numeric_only=numeric_only, min_count=min_count)
def prod(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "prod", args, kwargs)
nv.validate_resampler_func("prod", args, kwargs)
return self._downsample("prod", numeric_only=numeric_only, min_count=min_count)
def min(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "min", args, kwargs)
nv.validate_resampler_func("min", args, kwargs)
return self._downsample("min", numeric_only=numeric_only, min_count=min_count)
def max(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "max", args, kwargs)
nv.validate_resampler_func("max", args, kwargs)
return self._downsample("max", numeric_only=numeric_only, min_count=min_count)
def first(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "first", args, kwargs)
nv.validate_resampler_func("first", args, kwargs)
return self._downsample("first", numeric_only=numeric_only, min_count=min_count)
def last(
self,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "last", args, kwargs)
nv.validate_resampler_func("last", args, kwargs)
return self._downsample("last", numeric_only=numeric_only, min_count=min_count)
def median(self, numeric_only: bool = False, *args, **kwargs):
maybe_warn_args_and_kwargs(type(self), "median", args, kwargs)
nv.validate_resampler_func("median", args, kwargs)
return self._downsample("median", numeric_only=numeric_only)
def mean(
self,
numeric_only: bool = False,
*args,
**kwargs,
):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
DataFrame or Series
Mean of values within each group.
"""
maybe_warn_args_and_kwargs(type(self), "mean", args, kwargs)
nv.validate_resampler_func("mean", args, kwargs)
return self._downsample("mean", numeric_only=numeric_only)
def std(
self,
ddof: int = 1,
numeric_only: bool = False,
*args,
**kwargs,
):
"""
Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
DataFrame or Series
Standard deviation of values within each group.
"""
maybe_warn_args_and_kwargs(type(self), "std", args, kwargs)
nv.validate_resampler_func("std", args, kwargs)
return self._downsample("std", ddof=ddof, numeric_only=numeric_only)
def var(
self,
ddof: int = 1,
numeric_only: bool = False,
*args,
**kwargs,
):
"""
Compute variance of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
DataFrame or Series
Variance of values within each group.
"""
maybe_warn_args_and_kwargs(type(self), "var", args, kwargs)
nv.validate_resampler_func("var", args, kwargs)
return self._downsample("var", ddof=ddof, numeric_only=numeric_only)
def sem(
self,
ddof: int = 1,
numeric_only: bool = False,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "sem", args, kwargs)
nv.validate_resampler_func("sem", args, kwargs)
return self._downsample("sem", ddof=ddof, numeric_only=numeric_only)
def ohlc(
self,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "ohlc", args, kwargs)
nv.validate_resampler_func("ohlc", args, kwargs)
return self._downsample("ohlc")
def nunique(
self,
*args,
**kwargs,
):
maybe_warn_args_and_kwargs(type(self), "nunique", args, kwargs)
nv.validate_resampler_func("nunique", args, kwargs)
return self._downsample("nunique")
def size(self):
result = self._downsample("size")
# If the result is a non-empty DataFrame we stack to get a Series
# GH 46826
if isinstance(result, ABCDataFrame) and not result.empty:
result = result.stack()
if not len(self.ax):
from pandas import Series
if self._selected_obj.ndim == 1:
name = self._selected_obj.name
else:
name = None
result = Series([], index=result.index, dtype="int64", name=name)
return result
def count(self):
result = self._downsample("count")
if not len(self.ax):
if self._selected_obj.ndim == 1:
result = type(self._selected_obj)(
[], index=result.index, dtype="int64", name=self._selected_obj.name
)
else:
from pandas import DataFrame
result = DataFrame(
[], index=result.index, columns=result.columns, dtype="int64"
)
return result
def quantile(self, q: float | AnyArrayLike = 0.5, **kwargs):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Returns
-------
DataFrame or Series
Quantile of values within each group.
See Also
--------
Series.quantile
Return a series, where the index is q and the values are the quantiles.
DataFrame.quantile
Return a DataFrame, where the columns are the columns of self,
and the values are the quantiles.
DataFrameGroupBy.quantile
Return a DataFrame, where the columns are groupby columns,
and the values are its quantiles.
"""
return self._downsample("quantile", q=q, **kwargs)
class TimeGrouper(Grouper):
"""
Custom groupby class for time-interval grouping.
Parameters
----------
freq : pandas date offset or offset alias for identifying bin edges
closed : closed end of interval; 'left' or 'right'
label : interval boundary to use for labeling; 'left' or 'right'
convention : {'start', 'end', 'e', 's'}
If axis is PeriodIndex
"""
_attributes = Grouper._attributes + (
"closed",
"label",
"how",
"kind",
"convention",
"origin",
"offset",
)
origin: TimeGrouperOrigin
def __init__(
self,
freq: Frequency = "Min",
closed: Literal["left", "right"] | None = None,
label: Literal["left", "right"] | None = None,
how: str = "mean",
axis: Axis = 0,
fill_method=None,
limit=None,
kind: str | None = None,
convention: Literal["start", "end", "e", "s"] | None = None,
origin: Literal["epoch", "start", "start_day", "end", "end_day"]
| TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
group_keys: bool = False,
**kwargs,
) -> None:
# Check for correctness of the keyword arguments which would
# otherwise silently use the default if misspelled
if label not in {None, "left", "right"}:
raise ValueError(f"Unsupported value {label} for `label`")
if closed not in {None, "left", "right"}:
raise ValueError(f"Unsupported value {closed} for `closed`")
if convention not in {None, "start", "end", "e", "s"}:
raise ValueError(f"Unsupported value {convention} for `convention`")
freq = to_offset(freq)
end_types = {"M", "A", "Q", "BM", "BA", "BQ", "W"}
rule = freq.rule_code
if rule in end_types or ("-" in rule and rule[: rule.find("-")] in end_types):
if closed is None:
closed = "right"
if label is None:
label = "right"
else:
# The backward resample sets ``closed`` to ``'right'`` by default
# since the last value should be considered as the edge point for
# the last bin. When origin in "end" or "end_day", the value for a
# specific ``Timestamp`` index stands for the resample result from
# the current ``Timestamp`` minus ``freq`` to the current
# ``Timestamp`` with a right close.
if origin in ["end", "end_day"]:
if closed is None:
closed = "right"
if label is None:
label = "right"
else:
if closed is None:
closed = "left"
if label is None:
label = "left"
self.closed = closed
self.label = label
self.kind = kind
self.convention = convention if convention is not None else "e"
self.how = how
self.fill_method = fill_method
self.limit = limit
self.group_keys = group_keys
if origin in ("epoch", "start", "start_day", "end", "end_day"):
# error: Incompatible types in assignment (expression has type "Union[Union[
# Timestamp, datetime, datetime64, signedinteger[_64Bit], float, str],
# Literal['epoch', 'start', 'start_day', 'end', 'end_day']]", variable has
# type "Union[Timestamp, Literal['epoch', 'start', 'start_day', 'end',
# 'end_day']]")
self.origin = origin # type: ignore[assignment]
else:
try:
self.origin = Timestamp(origin)
except (ValueError, TypeError) as err:
raise ValueError(
"'origin' should be equal to 'epoch', 'start', 'start_day', "
"'end', 'end_day' or "
f"should be a Timestamp convertible type. Got '{origin}' instead."
) from err
try:
self.offset = Timedelta(offset) if offset is not None else None
except (ValueError, TypeError) as err:
raise ValueError(
"'offset' should be a Timedelta convertible type. "
f"Got '{offset}' instead."
) from err
# always sort time groupers
kwargs["sort"] = True
super().__init__(freq=freq, axis=axis, **kwargs)
def _get_resampler(self, obj: NDFrame, kind=None) -> Resampler:
"""
Return my resampler or raise if we have an invalid axis.
Parameters
----------
obj : Series or DataFrame
kind : string, optional
'period','timestamp','timedelta' are valid
Returns
-------
Resampler
Raises
------
TypeError if incompatible axis
"""
_, ax, indexer = self._set_grouper(obj, gpr_index=None)
if isinstance(ax, DatetimeIndex):
return DatetimeIndexResampler(
obj,
timegrouper=self,
kind=kind,
axis=self.axis,
group_keys=self.group_keys,
gpr_index=ax,
)
elif isinstance(ax, PeriodIndex) or kind == "period":
return PeriodIndexResampler(
obj,
timegrouper=self,
kind=kind,
axis=self.axis,
group_keys=self.group_keys,
gpr_index=ax,
)
elif isinstance(ax, TimedeltaIndex):
return TimedeltaIndexResampler(
obj,
timegrouper=self,
axis=self.axis,
group_keys=self.group_keys,
gpr_index=ax,
)
raise TypeError(
"Only valid with DatetimeIndex, "
"TimedeltaIndex or PeriodIndex, "
f"but got an instance of '{type(ax).__name__}'"
)
def _get_grouper(
self, obj: NDFrameT, validate: bool = True
) -> tuple[BinGrouper, NDFrameT]:
# create the resampler and return our binner
r = self._get_resampler(obj)
return r.grouper, cast(NDFrameT, r.obj)
def _get_time_bins(self, ax: DatetimeIndex):
if not isinstance(ax, DatetimeIndex):
raise TypeError(
"axis must be a DatetimeIndex, but got "
f"an instance of {type(ax).__name__}"
)
if len(ax) == 0:
binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = _get_timestamp_range_edges(
ax.min(),
ax.max(),
self.freq,
unit=ax.unit,
closed=self.closed,
origin=self.origin,
offset=self.offset,
)
# GH #12037
# use first/last directly instead of call replace() on them
# because replace() will swallow the nanosecond part
# thus last bin maybe slightly before the end if the end contains
# nanosecond part and lead to `Values falls after last bin` error
# GH 25758: If DST lands at midnight (e.g. 'America/Havana'), user feedback
# has noted that ambiguous=True provides the most sensible result
binner = labels = date_range(
freq=self.freq,
start=first,
end=last,
tz=ax.tz,
name=ax.name,
ambiguous=True,
nonexistent="shift_forward",
unit=ax.unit,
)
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(
ax_values, bin_edges, self.closed, hasnans=ax.hasnans
)
if self.closed == "right":
labels = binner
if self.label == "right":
labels = labels[1:]
elif self.label == "right":
labels = labels[1:]
if ax.hasnans:
binner = binner.insert(0, NaT)
labels = labels.insert(0, NaT)
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[: len(bins)]
return binner, bins, labels
def _adjust_bin_edges(
self, binner: DatetimeIndex, ax_values: npt.NDArray[np.int64]
) -> tuple[DatetimeIndex, npt.NDArray[np.int64]]:
# Some hacks for > daily data, see #1471, #1458, #1483
if self.freq != "D" and is_superperiod(self.freq, "D"):
if self.closed == "right":
# GH 21459, GH 9119: Adjust the bins relative to the wall time
edges_dti = binner.tz_localize(None)
edges_dti = (
edges_dti
+ Timedelta(days=1, unit=edges_dti.unit).as_unit(edges_dti.unit)
- Timedelta(1, unit=edges_dti.unit).as_unit(edges_dti.unit)
)
bin_edges = edges_dti.tz_localize(binner.tz).asi8
else:
bin_edges = binner.asi8
# intraday values on last day
if bin_edges[-2] > ax_values.max():
bin_edges = bin_edges[:-1]
binner = binner[:-1]
else:
bin_edges = binner.asi8
return binner, bin_edges
def _get_time_delta_bins(self, ax: TimedeltaIndex):
if not isinstance(ax, TimedeltaIndex):
raise TypeError(
"axis must be a TimedeltaIndex, but got "
f"an instance of {type(ax).__name__}"
)
if not len(ax):
binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
start, end = ax.min(), ax.max()
if self.closed == "right":
end += self.freq
labels = binner = timedelta_range(
start=start, end=end, freq=self.freq, name=ax.name
)
end_stamps = labels
if self.closed == "left":
end_stamps += self.freq
bins = ax.searchsorted(end_stamps, side=self.closed)
if self.offset:
# GH 10530 & 31809
labels += self.offset
return binner, bins, labels
def _get_time_period_bins(self, ax: DatetimeIndex):
if not isinstance(ax, DatetimeIndex):
raise TypeError(
"axis must be a DatetimeIndex, but got "
f"an instance of {type(ax).__name__}"
)
freq = self.freq
if not len(ax):
binner = labels = PeriodIndex(data=[], freq=freq, name=ax.name)
return binner, [], labels
labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name)
end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp()
if ax.tz:
end_stamps = end_stamps.tz_localize(ax.tz)
bins = ax.searchsorted(end_stamps, side="left")
return binner, bins, labels
def _get_period_bins(self, ax: PeriodIndex):
if not isinstance(ax, PeriodIndex):
raise TypeError(
"axis must be a PeriodIndex, but got "
f"an instance of {type(ax).__name__}"
)
memb = ax.asfreq(self.freq, how=self.convention)
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
nat_count = 0
if memb.hasnans:
# error: Incompatible types in assignment (expression has type
# "bool_", variable has type "int") [assignment]
nat_count = np.sum(memb._isnan) # type: ignore[assignment]
memb = memb[~memb._isnan]
if not len(memb):
# index contains no valid (non-NaT) values
bins = np.array([], dtype=np.int64)
binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name)
if len(ax) > 0:
# index is all NaT
binner, bins, labels = _insert_nat_bin(binner, bins, labels, len(ax))
return binner, bins, labels
freq_mult = self.freq.n
start = ax.min().asfreq(self.freq, how=self.convention)
end = ax.max().asfreq(self.freq, how="end")
bin_shift = 0
if isinstance(self.freq, Tick):
# GH 23882 & 31809: get adjusted bin edge labels with 'origin'
# and 'origin' support. This call only makes sense if the freq is a
# Tick since offset and origin are only used in those cases.
# Not doing this check could create an extra empty bin.
p_start, end = _get_period_range_edges(
start,
end,
self.freq,
closed=self.closed,
origin=self.origin,
offset=self.offset,
)
# Get offset for bin edge (not label edge) adjustment
start_offset = Period(start, self.freq) - Period(p_start, self.freq)
# error: Item "Period" of "Union[Period, Any]" has no attribute "n"
bin_shift = start_offset.n % freq_mult # type: ignore[union-attr]
start = p_start
labels = binner = period_range(
start=start, end=end, freq=self.freq, name=ax.name
)
i8 = memb.asi8
# when upsampling to subperiods, we need to generate enough bins
expected_bins_count = len(binner) * freq_mult
i8_extend = expected_bins_count - (i8[-1] - i8[0])
rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult)
rng += freq_mult
# adjust bin edge indexes to account for base
rng -= bin_shift
# Wrap in PeriodArray for PeriodArray.searchsorted
prng = type(memb._data)(rng, dtype=memb.dtype)
bins = memb.searchsorted(prng, side="left")
if nat_count > 0:
binner, bins, labels = _insert_nat_bin(binner, bins, labels, nat_count)
return binner, bins, labels
class GroupBy(BaseGroupBy[NDFrameT]):
"""
Class for grouping and aggregating relational data.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : str
Most users should ignore this
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
grouper: ops.BaseGrouper
as_index: bool
def __init__(
self,
obj: NDFrameT,
keys: _KeysArgType | None = None,
axis: Axis = 0,
level: IndexLabel | None = None,
grouper: ops.BaseGrouper | None = None,
exclusions: frozenset[Hashable] | None = None,
selection: IndexLabel | None = None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
observed: bool = False,
dropna: bool = True,
) -> None:
self._selection = selection
assert isinstance(obj, NDFrame), type(obj)
self.level = level
if not as_index:
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.observed = observed
self.dropna = dropna
if grouper is None:
grouper, exclusions, obj = get_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
dropna=self.dropna,
)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = frozenset(exclusions) if exclusions else frozenset()
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
def _op_via_apply(self, name: str, *args, **kwargs):
"""Compute the result of an operation by using GroupBy's apply."""
f = getattr(type(self._obj_with_exclusions), name)
sig = inspect.signature(f)
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.get("axis", None) is None or kwargs.get("axis") is lib.no_default:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
is_transform = name in base.transformation_kernels
result = self._python_apply_general(
curried,
self._obj_with_exclusions,
is_transform=is_transform,
not_indexed_same=not is_transform,
)
if self.grouper.has_dropped_na and is_transform:
# result will have dropped rows due to nans, fill with null
# and ensure index is ordered same as the input
result = self._set_result_index_ordered(result)
return result
# -----------------------------------------------------------------
# Selection
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
# -----------------------------------------------------------------
# Dispatch/Wrapping
def _concat_objects(
self,
values,
not_indexed_same: bool = False,
is_transform: bool = False,
):
from pandas.core.reshape.concat import concat
if self.group_keys and not is_transform:
if self.as_index:
# possible MI return case
group_keys = self.grouper.result_index
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(
values,
axis=self.axis,
keys=group_keys,
levels=group_levels,
names=group_names,
sort=False,
)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
elif not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if self.dropna:
labels = self.grouper.group_info[0]
mask = labels != -1
ax = ax[mask]
# this is a very unfortunate situation
# we can't use reindex to restore the original order
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
# TODO: can we re-use e.g. _reindex_non_unique?
if ax.has_duplicates and not result.axes[self.axis].equals(ax):
# e.g. test_category_order_transformer
target = algorithms.unique1d(ax._values)
indexer, _ = result.index.get_indexer_non_unique(target)
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis, copy=False)
else:
result = concat(values, axis=self.axis)
name = self.obj.name if self.obj.ndim == 1 else self._selection
if isinstance(result, Series) and name is not None:
result.name = name
return result
def _set_result_index_ordered(
self, result: OutputFrameOrSeries
) -> OutputFrameOrSeries:
# set the result index on the passed values object and
# return the new object, xref 8046
obj_axis = self.obj._get_axis(self.axis)
if self.grouper.is_monotonic and not self.grouper.has_dropped_na:
# shortcut if we have an already ordered grouper
result = result.set_axis(obj_axis, axis=self.axis, copy=False)
return result
# row order is scrambled => sort the rows by position in original index
original_positions = Index(self.grouper.result_ilocs())
result = result.set_axis(original_positions, axis=self.axis, copy=False)
result = result.sort_index(axis=self.axis)
if self.grouper.has_dropped_na:
# Add back in any missing rows due to dropna - index here is integral
# with values referring to the row of the input so can use RangeIndex
result = result.reindex(RangeIndex(len(obj_axis)), axis=self.axis)
result = result.set_axis(obj_axis, axis=self.axis, copy=False)
return result
def _insert_inaxis_grouper(self, result: Series | DataFrame) -> DataFrame:
if isinstance(result, Series):
result = result.to_frame()
# zip in reverse so we can always insert at loc 0
columns = result.columns
for name, lev, in_axis in zip(
reversed(self.grouper.names),
reversed(self.grouper.get_group_levels()),
reversed([grp.in_axis for grp in self.grouper.groupings]),
):
# GH #28549
# When using .apply(-), name will be in columns already
if in_axis and name not in columns:
result.insert(0, name, lev)
return result
def _indexed_output_to_ndframe(
self, result: Mapping[base.OutputKey, ArrayLike]
) -> Series | DataFrame:
raise AbstractMethodError(self)
def _maybe_transpose_result(self, result: NDFrameT) -> NDFrameT:
if self.axis == 1:
# Only relevant for DataFrameGroupBy, no-op for SeriesGroupBy
result = result.T
if result.index.equals(self.obj.index):
# Retain e.g. DatetimeIndex/TimedeltaIndex freq
# e.g. test_groupby_crash_on_nunique
result.index = self.obj.index.copy()
return result
def _wrap_aggregated_output(
self,
result: Series | DataFrame,
qs: npt.NDArray[np.float64] | None = None,
):
"""
Wraps the output of GroupBy aggregations into the expected result.
Parameters
----------
result : Series, DataFrame
Returns
-------
Series or DataFrame
"""
# ATM we do not get here for SeriesGroupBy; when we do, we will
# need to require that result.name already match self.obj.name
if not self.as_index:
# `not self.as_index` is only relevant for DataFrameGroupBy,
# enforced in __init__
result = self._insert_inaxis_grouper(result)
result = result._consolidate()
index = Index(range(self.grouper.ngroups))
else:
index = self.grouper.result_index
if qs is not None:
# We get here with len(qs) != 1 and not self.as_index
# in test_pass_args_kwargs
index = _insert_quantile_level(index, qs)
result.index = index
# error: Argument 1 to "_maybe_transpose_result" of "GroupBy" has
# incompatible type "Union[Series, DataFrame]"; expected "NDFrameT"
res = self._maybe_transpose_result(result) # type: ignore[arg-type]
return self._reindex_output(res, qs=qs)
def _wrap_applied_output(
self,
data,
values: list,
not_indexed_same: bool = False,
is_transform: bool = False,
):
raise AbstractMethodError(self)
# -----------------------------------------------------------------
# numba
def _numba_prep(self, data: DataFrame):
ids, _, ngroups = self.grouper.group_info
sorted_index = get_group_index_sorter(ids, ngroups)
sorted_ids = algorithms.take_nd(ids, sorted_index, allow_fill=False)
sorted_data = data.take(sorted_index, axis=self.axis).to_numpy()
if len(self.grouper.groupings) > 1:
raise NotImplementedError(
"More than 1 grouping labels are not supported with engine='numba'"
)
# GH 46867
index_data = data.index
if isinstance(index_data, MultiIndex):
group_key = self.grouper.groupings[0].name
index_data = index_data.get_level_values(group_key)
sorted_index_data = index_data.take(sorted_index).to_numpy()
starts, ends = lib.generate_slices(sorted_ids, ngroups)
return (
starts,
ends,
sorted_index_data,
sorted_data,
)
def _numba_agg_general(
self,
func: Callable,
engine_kwargs: dict[str, bool] | None,
*aggregator_args,
):
"""
Perform groupby with a standard numerical aggregation function (e.g. mean)
with Numba.
"""
if not self.as_index:
raise NotImplementedError(
"as_index=False is not supported. Use .reset_index() instead."
)
if self.axis == 1:
raise NotImplementedError("axis=1 is not supported.")
data = self._obj_with_exclusions
df = data if data.ndim == 2 else data.to_frame()
starts, ends, sorted_index, sorted_data = self._numba_prep(df)
aggregator = executor.generate_shared_aggregator(
func, **get_jit_arguments(engine_kwargs)
)
result = aggregator(sorted_data, starts, ends, 0, *aggregator_args)
index = self.grouper.result_index
if data.ndim == 1:
result_kwargs = {"name": data.name}
result = result.ravel()
else:
result_kwargs = {"columns": data.columns}
return data._constructor(result, index=index, **result_kwargs)
def _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs):
"""
Perform groupby transform routine with the numba engine.
This routine mimics the data splitting routine of the DataSplitter class
to generate the indices of each group in the sorted data and then passes the
data and indices into a Numba jitted function.
"""
data = self._obj_with_exclusions
df = data if data.ndim == 2 else data.to_frame()
starts, ends, sorted_index, sorted_data = self._numba_prep(df)
numba_.validate_udf(func)
numba_transform_func = numba_.generate_numba_transform_func(
func, **get_jit_arguments(engine_kwargs, kwargs)
)
result = numba_transform_func(
sorted_data,
sorted_index,
starts,
ends,
len(df.columns),
*args,
)
# result values needs to be resorted to their original positions since we
# evaluated the data sorted by group
result = result.take(np.argsort(sorted_index), axis=0)
index = data.index
if data.ndim == 1:
result_kwargs = {"name": data.name}
result = result.ravel()
else:
result_kwargs = {"columns": data.columns}
return data._constructor(result, index=index, **result_kwargs)
def _aggregate_with_numba(self, func, *args, engine_kwargs=None, **kwargs):
"""
Perform groupby aggregation routine with the numba engine.
This routine mimics the data splitting routine of the DataSplitter class
to generate the indices of each group in the sorted data and then passes the
data and indices into a Numba jitted function.
"""
data = self._obj_with_exclusions
df = data if data.ndim == 2 else data.to_frame()
starts, ends, sorted_index, sorted_data = self._numba_prep(df)
numba_.validate_udf(func)
numba_agg_func = numba_.generate_numba_agg_func(
func, **get_jit_arguments(engine_kwargs, kwargs)
)
result = numba_agg_func(
sorted_data,
sorted_index,
starts,
ends,
len(df.columns),
*args,
)
index = self.grouper.result_index
if data.ndim == 1:
result_kwargs = {"name": data.name}
result = result.ravel()
else:
result_kwargs = {"columns": data.columns}
res = data._constructor(result, index=index, **result_kwargs)
if not self.as_index:
res = self._insert_inaxis_grouper(res)
res.index = default_index(len(res))
return res
# -----------------------------------------------------------------
# apply/agg/transform
_apply_docs["template"].format(
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
def apply(self, func, *args, **kwargs) -> NDFrameT:
func = com.is_builtin_func(func)
if isinstance(func, str):
if hasattr(self, func):
res = getattr(self, func)
if callable(res):
return res(*args, **kwargs)
elif args or kwargs:
raise ValueError(f"Cannot pass arguments to property {func}")
return res
else:
raise TypeError(f"apply func should be callable, not '{func}'")
elif args or kwargs:
if callable(func):
def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f, self._selected_obj)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
return self._python_apply_general(f, self._obj_with_exclusions)
return result
def _python_apply_general(
self,
f: Callable,
data: DataFrame | Series,
not_indexed_same: bool | None = None,
is_transform: bool = False,
is_agg: bool = False,
) -> NDFrameT:
"""
Apply function f in python space
Parameters
----------
f : callable
Function to apply
data : Series or DataFrame
Data to apply f to
not_indexed_same: bool, optional
When specified, overrides the value of not_indexed_same. Apply behaves
differently when the result index is equal to the input index, but
this can be coincidental leading to value-dependent behavior.
is_transform : bool, default False
Indicator for whether the function is actually a transform
and should not have group keys prepended.
is_agg : bool, default False
Indicator for whether the function is an aggregation. When the
result is empty, we don't want to warn for this case.
See _GroupBy._python_agg_general.
Returns
-------
Series or DataFrame
data after applying f
"""
values, mutated = self.grouper.apply(f, data, self.axis)
if not_indexed_same is None:
not_indexed_same = mutated
return self._wrap_applied_output(
data,
values,
not_indexed_same,
is_transform,
)
def _agg_general(
self,
numeric_only: bool = False,
min_count: int = -1,
*,
alias: str,
npfunc: Callable,
):
result = self._cython_agg_general(
how=alias,
alt=npfunc,
numeric_only=numeric_only,
min_count=min_count,
)
return result.__finalize__(self.obj, method="groupby")
def _agg_py_fallback(
self, values: ArrayLike, ndim: int, alt: Callable
) -> ArrayLike:
"""
Fallback to pure-python aggregation if _cython_operation raises
NotImplementedError.
"""
# We get here with a) EADtypes and b) object dtype
assert alt is not None
if values.ndim == 1:
# For DataFrameGroupBy we only get here with ExtensionArray
ser = Series(values, copy=False)
else:
# We only get here with values.dtype == object
# TODO: special case not needed with ArrayManager
df = DataFrame(values.T)
# bc we split object blocks in grouped_reduce, we have only 1 col
# otherwise we'd have to worry about block-splitting GH#39329
assert df.shape[1] == 1
# Avoid call to self.values that can occur in DataFrame
# reductions; see GH#28949
ser = df.iloc[:, 0]
# We do not get here with UDFs, so we know that our dtype
# should always be preserved by the implemented aggregations
# TODO: Is this exactly right; see WrappedCythonOp get_result_dtype?
res_values = self.grouper.agg_series(ser, alt, preserve_dtype=True)
if isinstance(values, Categorical):
# Because we only get here with known dtype-preserving
# reductions, we cast back to Categorical.
# TODO: if we ever get "rank" working, exclude it here.
res_values = type(values)._from_sequence(res_values, dtype=values.dtype)
elif ser.dtype == object:
res_values = res_values.astype(object, copy=False)
# If we are DataFrameGroupBy and went through a SeriesGroupByPath
# then we need to reshape
# GH#32223 includes case with IntegerArray values, ndarray res_values
# test_groupby_duplicate_columns with object dtype values
return ensure_block_shape(res_values, ndim=ndim)
def _cython_agg_general(
self,
how: str,
alt: Callable,
numeric_only: bool = False,
min_count: int = -1,
**kwargs,
):
# Note: we never get here with how="ohlc" for DataFrameGroupBy;
# that goes through SeriesGroupBy
data = self._get_data_to_aggregate(numeric_only=numeric_only, name=how)
def array_func(values: ArrayLike) -> ArrayLike:
try:
result = self.grouper._cython_operation(
"aggregate",
values,
how,
axis=data.ndim - 1,
min_count=min_count,
**kwargs,
)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
# TODO: shouldn't min_count matter?
result = self._agg_py_fallback(values, ndim=data.ndim, alt=alt)
return result
new_mgr = data.grouped_reduce(array_func)
res = self._wrap_agged_manager(new_mgr)
out = self._wrap_aggregated_output(res)
if self.axis == 1:
out = out.infer_objects(copy=False)
return out
def _cython_transform(
self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs
):
raise AbstractMethodError(self)
def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
return self._transform_with_numba(
func, *args, engine_kwargs=engine_kwargs, **kwargs
)
# optimized transforms
func = com.get_cython_func(func) or func
if not isinstance(func, str):
return self._transform_general(func, *args, **kwargs)
elif func not in base.transform_kernel_allowlist:
msg = f"'{func}' is not a valid function name for transform(name)"
raise ValueError(msg)
elif func in base.cythonized_kernels or func in base.transformation_kernels:
# cythonized transform or canned "agg+broadcast"
return getattr(self, func)(*args, **kwargs)
else:
# i.e. func in base.reduction_kernels
# GH#30918 Use _transform_fast only when we know func is an aggregation
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
# Temporarily set observed for dealing with categoricals.
with com.temp_setattr(self, "observed", True):
with com.temp_setattr(self, "as_index", True):
# GH#49834 - result needs groups in the index for
# _wrap_transform_fast_result
result = getattr(self, func)(*args, **kwargs)
return self._wrap_transform_fast_result(result)
def _wrap_transform_fast_result(self, result: NDFrameT) -> NDFrameT:
"""
Fast transform path for aggregations.
"""
obj = self._obj_with_exclusions
# for each col, reshape to size of original frame by take operation
ids, _, _ = self.grouper.group_info
result = result.reindex(self.grouper.result_index, axis=self.axis, copy=False)
if self.obj.ndim == 1:
# i.e. SeriesGroupBy
out = algorithms.take_nd(result._values, ids)
output = obj._constructor(out, index=obj.index, name=obj.name)
else:
# `.size()` gives Series output on DataFrame input, need axis 0
axis = 0 if result.ndim == 1 else self.axis
# GH#46209
# Don't convert indices: negative indices need to give rise
# to null values in the result
output = result._take(ids, axis=axis, convert_indices=False)
output = output.set_axis(obj._get_axis(self.axis), axis=axis)
return output
# -----------------------------------------------------------------
# Utilities
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
def _cumcount_array(self, ascending: bool = True) -> np.ndarray:
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
if self.grouper.has_dropped_na:
out = np.where(ids == -1, np.nan, out.astype(np.float64, copy=False))
else:
out = out.astype(np.int64, copy=False)
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev]
# -----------------------------------------------------------------
def _obj_1d_constructor(self) -> Callable:
# GH28330 preserve subclassed Series/DataFrames
if isinstance(self.obj, DataFrame):
return self.obj._constructor_sliced
assert isinstance(self.obj, Series)
return self.obj._constructor
def _bool_agg(self, val_test: Literal["any", "all"], skipna: bool):
"""
Shared func to call any / all Cython GroupBy implementations.
"""
def objs_to_bool(vals: ArrayLike) -> tuple[np.ndarray, type]:
if is_object_dtype(vals.dtype) and skipna:
# GH#37501: don't raise on pd.NA when skipna=True
mask = isna(vals)
if mask.any():
# mask on original values computed separately
vals = vals.copy()
vals[mask] = True
elif isinstance(vals, BaseMaskedArray):
vals = vals._data
vals = vals.astype(bool, copy=False)
return vals.view(np.int8), bool
def result_to_bool(
result: np.ndarray,
inference: type,
nullable: bool = False,
) -> ArrayLike:
if nullable:
return BooleanArray(result.astype(bool, copy=False), result == -1)
else:
return result.astype(inference, copy=False)
return self._get_cythonized_result(
libgroupby.group_any_all,
numeric_only=False,
cython_dtype=np.dtype(np.int8),
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
Series or DataFrame
DataFrame or Series of boolean values, where a value is True if any element
is True within its respective group, False otherwise.
"""
return self._bool_agg("any", skipna)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
Series or DataFrame
DataFrame or Series of boolean values, where a value is True if all elements
are True within its respective group, False otherwise.
"""
return self._bool_agg("all", skipna)
def count(self) -> NDFrameT:
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
"""
data = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
is_series = data.ndim == 1
def hfunc(bvalues: ArrayLike) -> ArrayLike:
# TODO(EA2D): reshape would not be necessary with 2D EAs
if bvalues.ndim == 1:
# EA
masked = mask & ~isna(bvalues).reshape(1, -1)
else:
masked = mask & ~isna(bvalues)
counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups)
if is_series:
assert counted.ndim == 2
assert counted.shape[0] == 1
return counted[0]
return counted
new_mgr = data.grouped_reduce(hfunc)
new_obj = self._wrap_agged_manager(new_mgr)
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _wrap_aggregated_output() returns. GH 35028
# e.g. test_dataframe_groupby_on_2_categoricals_when_observed_is_false
with com.temp_setattr(self, "observed", True):
result = self._wrap_aggregated_output(new_obj)
return self._reindex_output(result, fill_value=0)
def mean(
self,
numeric_only: bool = False,
engine: str = "cython",
engine_kwargs: dict[str, bool] | None = None,
):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None`` and defaults to ``False``.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
.. versionadded:: 1.4.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}``
.. versionadded:: 1.4.0
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
C
A B
1 2.0 2.0
4.0 1.0
2 3.0 1.0
5.0 2.0
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
if maybe_use_numba(engine):
from pandas.core._numba.kernels import sliding_mean
return self._numba_agg_general(sliding_mean, engine_kwargs)
else:
result = self._cython_agg_general(
"mean",
alt=lambda x: Series(x).mean(numeric_only=numeric_only),
numeric_only=numeric_only,
)
return result.__finalize__(self.obj, method="groupby")
def median(self, numeric_only: bool = False):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None`` and defaults to False.
Returns
-------
Series or DataFrame
Median of values within each group.
"""
result = self._cython_agg_general(
"median",
alt=lambda x: Series(x).median(numeric_only=numeric_only),
numeric_only=numeric_only,
)
return result.__finalize__(self.obj, method="groupby")
def std(
self,
ddof: int = 1,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
numeric_only: bool = False,
):
"""
Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
.. versionadded:: 1.4.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}``
.. versionadded:: 1.4.0
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
Series or DataFrame
Standard deviation of values within each group.
"""
if maybe_use_numba(engine):
from pandas.core._numba.kernels import sliding_var
return np.sqrt(self._numba_agg_general(sliding_var, engine_kwargs, ddof))
else:
def _preprocessing(values):
if isinstance(values, BaseMaskedArray):
return values._data, None
return values, None
def _postprocessing(
vals, inference, nullable: bool = False, result_mask=None
) -> ArrayLike:
if nullable:
if result_mask.ndim == 2:
result_mask = result_mask[:, 0]
return FloatingArray(np.sqrt(vals), result_mask.view(np.bool_))
return np.sqrt(vals)
result = self._get_cythonized_result(
libgroupby.group_var,
cython_dtype=np.dtype(np.float64),
numeric_only=numeric_only,
needs_counts=True,
pre_processing=_preprocessing,
post_processing=_postprocessing,
ddof=ddof,
how="std",
)
return result
def var(
self,
ddof: int = 1,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
numeric_only: bool = False,
):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
.. versionadded:: 1.4.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}``
.. versionadded:: 1.4.0
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
if maybe_use_numba(engine):
from pandas.core._numba.kernels import sliding_var
return self._numba_agg_general(sliding_var, engine_kwargs, ddof)
else:
return self._cython_agg_general(
"var",
alt=lambda x: Series(x).var(ddof=ddof),
numeric_only=numeric_only,
ddof=ddof,
)
def _value_counts(
self,
subset: Sequence[Hashable] | None = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
dropna: bool = True,
) -> DataFrame | Series:
"""
Shared implementation of value_counts for SeriesGroupBy and DataFrameGroupBy.
SeriesGroupBy additionally supports a bins argument. See the docstring of
DataFrameGroupBy.value_counts for a description of arguments.
"""
if self.axis == 1:
raise NotImplementedError(
"DataFrameGroupBy.value_counts only handles axis=0"
)
name = "proportion" if normalize else "count"
df = self.obj
obj = self._obj_with_exclusions
in_axis_names = {
grouping.name for grouping in self.grouper.groupings if grouping.in_axis
}
if isinstance(obj, Series):
_name = obj.name
keys = [] if _name in in_axis_names else [obj]
else:
unique_cols = set(obj.columns)
if subset is not None:
subsetted = set(subset)
clashing = subsetted & set(in_axis_names)
if clashing:
raise ValueError(
f"Keys {clashing} in subset cannot be in "
"the groupby column keys."
)
doesnt_exist = subsetted - unique_cols
if doesnt_exist:
raise ValueError(
f"Keys {doesnt_exist} in subset do not "
f"exist in the DataFrame."
)
else:
subsetted = unique_cols
keys = [
# Can't use .values because the column label needs to be preserved
obj.iloc[:, idx]
for idx, _name in enumerate(obj.columns)
if _name not in in_axis_names and _name in subsetted
]
groupings = list(self.grouper.groupings)
for key in keys:
grouper, _, _ = get_grouper(
df,
key=key,
axis=self.axis,
sort=self.sort,
observed=False,
dropna=dropna,
)
groupings += list(grouper.groupings)
# Take the size of the overall columns
gb = df.groupby(
groupings,
sort=self.sort,
observed=self.observed,
dropna=self.dropna,
)
result_series = cast(Series, gb.size())
result_series.name = name
# GH-46357 Include non-observed categories
# of non-grouping columns regardless of `observed`
if any(
isinstance(grouping.grouping_vector, (Categorical, CategoricalIndex))
and not grouping._observed
for grouping in groupings
):
levels_list = [ping.result_index for ping in groupings]
multi_index, _ = MultiIndex.from_product(
levels_list, names=[ping.name for ping in groupings]
).sortlevel()
result_series = result_series.reindex(multi_index, fill_value=0)
if normalize:
# Normalize the results by dividing by the original group sizes.
# We are guaranteed to have the first N levels be the
# user-requested grouping.
levels = list(
range(len(self.grouper.groupings), result_series.index.nlevels)
)
indexed_group_size = result_series.groupby(
result_series.index.droplevel(levels),
sort=self.sort,
dropna=self.dropna,
).transform("sum")
result_series /= indexed_group_size
# Handle groups of non-observed categories
result_series = result_series.fillna(0.0)
if sort:
# Sort the values and then resort by the main grouping
index_level = range(len(self.grouper.groupings))
result_series = result_series.sort_values(ascending=ascending).sort_index(
level=index_level, sort_remaining=False
)
result: Series | DataFrame
if self.as_index:
result = result_series
else:
# Convert to frame
index = result_series.index
columns = com.fill_missing_names(index.names)
if name in columns:
raise ValueError(f"Column label '{name}' is duplicate of result column")
result_series.name = name
result_series.index = index.set_names(range(len(columns)))
result_frame = result_series.reset_index()
result_frame.columns = columns + [name]
result = result_frame
return result.__finalize__(self.obj, method="value_counts")
def sem(self, ddof: int = 1, numeric_only: bool = False):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
"""
if numeric_only and self.obj.ndim == 1 and not is_numeric_dtype(self.obj.dtype):
raise TypeError(
f"{type(self).__name__}.sem called with "
f"numeric_only={numeric_only} and dtype {self.obj.dtype}"
)
result = self.std(ddof=ddof, numeric_only=numeric_only)
if result.ndim == 1:
result /= np.sqrt(self.count())
else:
cols = result.columns.difference(self.exclusions).unique()
counts = self.count()
result_ilocs = result.columns.get_indexer_for(cols)
count_ilocs = counts.columns.get_indexer_for(cols)
result.iloc[:, result_ilocs] /= np.sqrt(counts.iloc[:, count_ilocs])
return result
def size(self) -> DataFrame | Series:
"""
Compute group sizes.
Returns
-------
DataFrame or Series
Number of rows in each group as a Series if as_index is True
or a DataFrame if as_index is False.
"""
result = self.grouper.size()
# GH28330 preserve subclassed Series/DataFrames through calls
if isinstance(self.obj, Series):
result = self._obj_1d_constructor(result, name=self.obj.name)
else:
result = self._obj_1d_constructor(result)
with com.temp_setattr(self, "as_index", True):
# size already has the desired behavior in GH#49519, but this makes the
# as_index=False path of _reindex_output fail on categorical groupers.
result = self._reindex_output(result, fill_value=0)
if not self.as_index:
# error: Incompatible types in assignment (expression has
# type "DataFrame", variable has type "Series")
result = result.rename("size").reset_index() # type: ignore[assignment]
return result
def sum(
self,
numeric_only: bool = False,
min_count: int = 0,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
if maybe_use_numba(engine):
from pandas.core._numba.kernels import sliding_sum
return self._numba_agg_general(
sliding_sum,
engine_kwargs,
)
else:
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _agg_general() returns. GH #31422
with com.temp_setattr(self, "observed", True):
result = self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="sum",
npfunc=np.sum,
)
return self._reindex_output(result, fill_value=0)
def prod(self, numeric_only: bool = False, min_count: int = 0):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod
)
def min(
self,
numeric_only: bool = False,
min_count: int = -1,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
if maybe_use_numba(engine):
from pandas.core._numba.kernels import sliding_min_max
return self._numba_agg_general(sliding_min_max, engine_kwargs, False)
else:
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="min",
npfunc=np.min,
)
def max(
self,
numeric_only: bool = False,
min_count: int = -1,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
if maybe_use_numba(engine):
from pandas.core._numba.kernels import sliding_min_max
return self._numba_agg_general(sliding_min_max, engine_kwargs, True)
else:
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="max",
npfunc=np.max,
)
def first(self, numeric_only: bool = False, min_count: int = -1):
"""
Compute the first non-null entry of each column.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
min_count : int, default -1
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
First non-null of values within each group.
See Also
--------
DataFrame.groupby : Apply a function groupby to each row or column of a
DataFrame.
pandas.core.groupby.DataFrameGroupBy.last : Compute the last non-null entry
of each column.
pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group.
Examples
--------
>>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[None, 5, 6], C=[1, 2, 3],
... D=['3/11/2000', '3/12/2000', '3/13/2000']))
>>> df['D'] = pd.to_datetime(df['D'])
>>> df.groupby("A").first()
B C D
A
1 5.0 1 2000-03-11
3 6.0 3 2000-03-13
>>> df.groupby("A").first(min_count=2)
B C D
A
1 NaN 1.0 2000-03-11
3 NaN NaN NaT
>>> df.groupby("A").first(numeric_only=True)
B C
A
1 5.0 1
3 6.0 3
"""
def first_compat(obj: NDFrameT, axis: AxisInt = 0):
def first(x: Series):
"""Helper function for first item that isn't NA."""
arr = x.array[notna(x.array)]
if not len(arr):
return np.nan
return arr[0]
if isinstance(obj, DataFrame):
return obj.apply(first, axis=axis)
elif isinstance(obj, Series):
return first(obj)
else: # pragma: no cover
raise TypeError(type(obj))
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="first",
npfunc=first_compat,
)
def last(self, numeric_only: bool = False, min_count: int = -1):
"""
Compute the last non-null entry of each column.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
min_count : int, default -1
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Last non-null of values within each group.
See Also
--------
DataFrame.groupby : Apply a function groupby to each row or column of a
DataFrame.
pandas.core.groupby.DataFrameGroupBy.first : Compute the first non-null entry
of each column.
pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group.
Examples
--------
>>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[5, None, 6], C=[1, 2, 3]))
>>> df.groupby("A").last()
B C
A
1 5.0 2
3 6.0 3
"""
def last_compat(obj: NDFrameT, axis: AxisInt = 0):
def last(x: Series):
"""Helper function for last item that isn't NA."""
arr = x.array[notna(x.array)]
if not len(arr):
return np.nan
return arr[-1]
if isinstance(obj, DataFrame):
return obj.apply(last, axis=axis)
elif isinstance(obj, Series):
return last(obj)
else: # pragma: no cover
raise TypeError(type(obj))
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="last",
npfunc=last_compat,
)
def ohlc(self) -> DataFrame:
"""
Compute open, high, low and close values of a group, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Returns
-------
DataFrame
Open, high, low and close values within each group.
"""
if self.obj.ndim == 1:
# self._iterate_slices() yields only self._selected_obj
obj = self._selected_obj
is_numeric = is_numeric_dtype(obj.dtype)
if not is_numeric:
raise DataError("No numeric types to aggregate")
res_values = self.grouper._cython_operation(
"aggregate", obj._values, "ohlc", axis=0, min_count=-1
)
agg_names = ["open", "high", "low", "close"]
result = self.obj._constructor_expanddim(
res_values, index=self.grouper.result_index, columns=agg_names
)
return self._reindex_output(result)
result = self._apply_to_column_groupbys(
lambda x: x.ohlc(), self._obj_with_exclusions
)
if not self.as_index:
result = self._insert_inaxis_grouper(result)
result.index = default_index(len(result))
return result
def describe(
self,
percentiles=None,
include=None,
exclude=None,
) -> NDFrameT:
obj = self._obj_with_exclusions
if len(obj) == 0:
described = obj.describe(
percentiles=percentiles, include=include, exclude=exclude
)
if obj.ndim == 1:
result = described
else:
result = described.unstack()
return result.to_frame().T.iloc[:0]
with com.temp_setattr(self, "as_index", True):
result = self._python_apply_general(
lambda x: x.describe(
percentiles=percentiles, include=include, exclude=exclude
),
obj,
not_indexed_same=True,
)
if self.axis == 1:
return result.T
# GH#49256 - properly handle the grouping column(s)
result = result.unstack()
if not self.as_index:
result = self._insert_inaxis_grouper(result)
result.index = default_index(len(result))
return result
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper.
Given a grouper, the function resamples it according to a string
"string" -> "frequency".
See the :ref:`frequency aliases <timeseries.offset_aliases>`
documentation for more details.
Parameters
----------
rule : str or DateOffset
The offset string or object representing target grouper conversion.
*args, **kwargs
Possible arguments are `how`, `fill_method`, `limit`, `kind` and
`on`, and other arguments of `TimeGrouper`.
Returns
-------
Grouper
Return a new grouper with our resampler appended.
See Also
--------
Grouper : Specify a frequency to resample with when
grouping by a key.
DatetimeIndex.resample : Frequency conversion and resampling of
time series.
Examples
--------
>>> idx = pd.date_range('1/1/2000', periods=4, freq='T')
>>> df = pd.DataFrame(data=4 * [range(2)],
... index=idx,
... columns=['a', 'b'])
>>> df.iloc[2, 0] = 5
>>> df
a b
2000-01-01 00:00:00 0 1
2000-01-01 00:01:00 0 1
2000-01-01 00:02:00 5 1
2000-01-01 00:03:00 0 1
Downsample the DataFrame into 3 minute bins and sum the values of
the timestamps falling into a bin.
>>> df.groupby('a').resample('3T').sum()
a b
a
0 2000-01-01 00:00:00 0 2
2000-01-01 00:03:00 0 1
5 2000-01-01 00:00:00 5 1
Upsample the series into 30 second bins.
>>> df.groupby('a').resample('30S').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:00:30 0 0
2000-01-01 00:01:00 0 1
2000-01-01 00:01:30 0 0
2000-01-01 00:02:00 0 0
2000-01-01 00:02:30 0 0
2000-01-01 00:03:00 0 1
5 2000-01-01 00:02:00 5 1
Resample by month. Values are assigned to the month of the period.
>>> df.groupby('a').resample('M').sum()
a b
a
0 2000-01-31 0 3
5 2000-01-31 5 1
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> df.groupby('a').resample('3T', closed='right').sum()
a b
a
0 1999-12-31 23:57:00 0 1
2000-01-01 00:00:00 0 2
5 2000-01-01 00:00:00 5 1
Downsample the series into 3 minute bins and close the right side of
the bin interval, but label each bin using the right edge instead of
the left.
>>> df.groupby('a').resample('3T', closed='right', label='right').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:03:00 0 2
5 2000-01-01 00:03:00 5 1
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
def rolling(self, *args, **kwargs) -> RollingGroupby:
"""
Return a rolling grouper, providing rolling functionality per group.
Parameters
----------
window : int, timedelta, str, offset, or BaseIndexer subclass
Size of the moving window.
If an integer, the fixed number of observations used for
each window.
If a timedelta, str, or offset, the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes.
To learn more about the offsets & frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
If a BaseIndexer subclass, the window boundaries
based on the defined ``get_window_bounds`` method. Additional rolling
keyword arguments, namely ``min_periods``, ``center``, ``closed`` and
``step`` will be passed to ``get_window_bounds``.
min_periods : int, default None
Minimum number of observations in window required to have a value;
otherwise, result is ``np.nan``.
For a window that is specified by an offset,
``min_periods`` will default to 1.
For a window that is specified by an integer, ``min_periods`` will default
to the size of the window.
center : bool, default False
If False, set the window labels as the right edge of the window index.
If True, set the window labels as the center of the window index.
win_type : str, default None
If ``None``, all points are evenly weighted.
If a string, it must be a valid `scipy.signal window function
<https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__.
Certain Scipy window types require additional parameters to be passed
in the aggregation function. The additional parameters must match
the keywords specified in the Scipy window type method signature.
on : str, optional
For a DataFrame, a column label or Index level on which
to calculate the rolling window, rather than the DataFrame's index.
Provided integer column is ignored and excluded from result since
an integer index is not used to calculate the rolling window.
axis : int or str, default 0
If ``0`` or ``'index'``, roll across the rows.
If ``1`` or ``'columns'``, roll across the columns.
For `Series` this parameter is unused and defaults to 0.
closed : str, default None
If ``'right'``, the first point in the window is excluded from calculations.
If ``'left'``, the last point in the window is excluded from calculations.
If ``'both'``, the no points in the window are excluded from calculations.
If ``'neither'``, the first and last points in the window are excluded
from calculations.
Default ``None`` (``'right'``).
method : str {'single', 'table'}, default 'single'
Execute the rolling operation per single column or row (``'single'``)
or over the entire object (``'table'``).
This argument is only implemented when specifying ``engine='numba'``
in the method call.
Returns
-------
RollingGroupby
Return a new grouper with our rolling appended.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.groupby : Apply a function groupby to a Series.
DataFrame.groupby : Apply a function groupby.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': [0.362, 0.227, 1.267, -0.562]})
>>> df
A B C
0 1 1 0.362
1 1 2 0.227
2 2 3 1.267
3 2 4 -0.562
>>> df.groupby('A').rolling(2).sum()
B C
A
1 0 NaN NaN
1 3.0 0.589
2 2 NaN NaN
3 7.0 0.705
>>> df.groupby('A').rolling(2, min_periods=1).sum()
B C
A
1 0 1.0 0.362
1 3.0 0.589
2 2 3.0 1.267
3 7.0 0.705
>>> df.groupby('A').rolling(2, on='B').sum()
B C
A
1 0 1 NaN
1 2 0.589
2 2 3 NaN
3 4 0.705
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(
self._selected_obj,
*args,
_grouper=self.grouper,
_as_index=self.as_index,
**kwargs,
)
def expanding(self, *args, **kwargs) -> ExpandingGroupby:
"""
Return an expanding grouper, providing expanding
functionality per group.
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(
self._selected_obj,
*args,
_grouper=self.grouper,
**kwargs,
)
def ewm(self, *args, **kwargs) -> ExponentialMovingWindowGroupby:
"""
Return an ewm grouper, providing ewm functionality per group.
"""
from pandas.core.window import ExponentialMovingWindowGroupby
return ExponentialMovingWindowGroupby(
self._selected_obj,
*args,
_grouper=self.grouper,
**kwargs,
)
def _fill(self, direction: Literal["ffill", "bfill"], limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad : Returns Series with minimum number of char in object.
backfill : Backward fill the missing values in the dataset.
"""
# Need int value for Cython
if limit is None:
limit = -1
ids, _, _ = self.grouper.group_info
sorted_labels = np.argsort(ids, kind="mergesort").astype(np.intp, copy=False)
if direction == "bfill":
sorted_labels = sorted_labels[::-1]
col_func = partial(
libgroupby.group_fillna_indexer,
labels=ids,
sorted_labels=sorted_labels,
direction=direction,
limit=limit,
dropna=self.dropna,
)
def blk_func(values: ArrayLike) -> ArrayLike:
mask = isna(values)
if values.ndim == 1:
indexer = np.empty(values.shape, dtype=np.intp)
col_func(out=indexer, mask=mask)
return algorithms.take_nd(values, indexer)
else:
# We broadcast algorithms.take_nd analogous to
# np.take_along_axis
# Note: we only get here with backfill/pad,
# so if we have a dtype that cannot hold NAs,
# then there will be no -1s in indexer, so we can use
# the original dtype (no need to ensure_dtype_can_hold_na)
if isinstance(values, np.ndarray):
dtype = values.dtype
if self.grouper.has_dropped_na:
# dropped null groups give rise to nan in the result
dtype = ensure_dtype_can_hold_na(values.dtype)
out = np.empty(values.shape, dtype=dtype)
else:
out = type(values)._empty(values.shape, dtype=values.dtype)
for i, value_element in enumerate(values):
# call group_fillna_indexer column-wise
indexer = np.empty(values.shape[1], dtype=np.intp)
col_func(out=indexer, mask=mask[i])
out[i, :] = algorithms.take_nd(value_element, indexer)
return out
mgr = self._get_data_to_aggregate()
res_mgr = mgr.apply(blk_func)
new_obj = self._wrap_agged_manager(res_mgr)
if self.axis == 1:
# Only relevant for DataFrameGroupBy
new_obj = new_obj.T
new_obj.columns = self.obj.columns
new_obj.index = self.obj.index
return new_obj
def ffill(self, limit=None):
"""
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.ffill: Returns Series with minimum number of char in object.
DataFrame.ffill: Object with missing values filled or None if inplace=True.
Series.fillna: Fill NaN values of a Series.
DataFrame.fillna: Fill NaN values of a DataFrame.
"""
return self._fill("ffill", limit=limit)
def bfill(self, limit=None):
"""
Backward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.bfill : Backward fill the missing values in the dataset.
DataFrame.bfill: Backward fill the missing values in the dataset.
Series.fillna: Fill NaN values of a Series.
DataFrame.fillna: Fill NaN values of a DataFrame.
"""
return self._fill("bfill", limit=limit)
def nth(self) -> GroupByNthSelector:
"""
Take the nth row from each group if n is an int, otherwise a subset of rows.
Can be either a call or an index. dropna is not available with index notation.
Index notation accepts a comma separated list of integers and slices.
If dropna, will take the nth non-null row, dropna is either
'all' or 'any'; this is equivalent to calling dropna(how=dropna)
before the groupby.
Parameters
----------
n : int, slice or list of ints and slices
A single nth value for the row or a list of nth values or slices.
.. versionchanged:: 1.4.0
Added slice and lists containing slices.
Added index notation.
dropna : {'any', 'all', None}, default None
Apply the specified dropna operation before counting which row is
the nth row. Only supported if n is an int.
Returns
-------
Series or DataFrame
N-th value within each group.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 2 3.0
>>> g.nth(1)
A B
1 1 2.0
4 2 5.0
>>> g.nth(-1)
A B
3 1 4.0
4 2 5.0
>>> g.nth([0, 1])
A B
0 1 NaN
1 1 2.0
2 2 3.0
4 2 5.0
>>> g.nth(slice(None, -1))
A B
0 1 NaN
1 1 2.0
2 2 3.0
Index notation may also be used
>>> g.nth[0, 1]
A B
0 1 NaN
1 1 2.0
2 2 3.0
4 2 5.0
>>> g.nth[:-1]
A B
0 1 NaN
1 1 2.0
2 2 3.0
Specifying `dropna` allows ignoring ``NaN`` values
>>> g.nth(0, dropna='any')
A B
1 1 2.0
2 2 3.0
When the specified ``n`` is larger than any of the groups, an
empty DataFrame is returned
>>> g.nth(3, dropna='any')
Empty DataFrame
Columns: [A, B]
Index: []
"""
return GroupByNthSelector(self)
def _nth(
self,
n: PositionalIndexer | tuple,
dropna: Literal["any", "all", None] = None,
) -> NDFrameT:
if not dropna:
mask = self._make_mask_from_positional_indexer(n)
ids, _, _ = self.grouper.group_info
# Drop NA values in grouping
mask = mask & (ids != -1)
out = self._mask_selected_obj(mask)
return out
# dropna is truthy
if not is_integer(n):
raise ValueError("dropna option only supported for an integer argument")
if dropna not in ["any", "all"]:
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError(
"For a DataFrame or Series groupby.nth, dropna must be "
"either None, 'any' or 'all', "
f"(was passed {dropna})."
)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
n = cast(int, n)
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = self.grouper.codes_info[axis.isin(dropped.index)]
if self.grouper.has_dropped_na:
# Null groups need to still be encoded as -1 when passed to groupby
nulls = grouper == -1
# error: No overload variant of "where" matches argument types
# "Any", "NAType", "Any"
values = np.where(nulls, NA, grouper) # type: ignore[call-overload]
grouper = Index(values, dtype="Int64")
else:
# create a grouper with the original parameters, but on dropped
# object
grouper, _, _ = get_grouper(
dropped,
key=self.keys,
axis=self.axis,
level=self.level,
sort=self.sort,
)
grb = dropped.groupby(
grouper, as_index=self.as_index, sort=self.sort, axis=self.axis
)
return grb.nth(n)
def quantile(
self,
q: float | AnyArrayLike = 0.5,
interpolation: str = "linear",
numeric_only: bool = False,
):
"""
Return group values at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value(s) between 0 and 1 providing the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Method to use when the desired quantile falls between two points.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
Series or DataFrame
Return type determined by caller of GroupBy object.
See Also
--------
Series.quantile : Similar method for Series.
DataFrame.quantile : Similar method for DataFrame.
numpy.percentile : NumPy method to compute qth percentile.
Examples
--------
>>> df = pd.DataFrame([
... ['a', 1], ['a', 2], ['a', 3],
... ['b', 1], ['b', 3], ['b', 5]
... ], columns=['key', 'val'])
>>> df.groupby('key').quantile()
val
key
a 2.0
b 3.0
"""
def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, DtypeObj | None]:
if is_object_dtype(vals):
raise TypeError(
"'quantile' cannot be performed against 'object' dtypes!"
)
inference: DtypeObj | None = None
if isinstance(vals, BaseMaskedArray) and is_numeric_dtype(vals.dtype):
out = vals.to_numpy(dtype=float, na_value=np.nan)
inference = vals.dtype
elif is_integer_dtype(vals.dtype):
if isinstance(vals, ExtensionArray):
out = vals.to_numpy(dtype=float, na_value=np.nan)
else:
out = vals
inference = np.dtype(np.int64)
elif is_bool_dtype(vals.dtype) and isinstance(vals, ExtensionArray):
out = vals.to_numpy(dtype=float, na_value=np.nan)
elif needs_i8_conversion(vals.dtype):
inference = vals.dtype
# In this case we need to delay the casting until after the
# np.lexsort below.
# error: Incompatible return value type (got
# "Tuple[Union[ExtensionArray, ndarray[Any, Any]], Union[Any,
# ExtensionDtype]]", expected "Tuple[ndarray[Any, Any],
# Optional[Union[dtype[Any], ExtensionDtype]]]")
return vals, inference # type: ignore[return-value]
elif isinstance(vals, ExtensionArray) and is_float_dtype(vals):
inference = np.dtype(np.float64)
out = vals.to_numpy(dtype=float, na_value=np.nan)
else:
out = np.asarray(vals)
return out, inference
def post_processor(
vals: np.ndarray,
inference: DtypeObj | None,
result_mask: np.ndarray | None,
orig_vals: ArrayLike,
) -> ArrayLike:
if inference:
# Check for edge case
if isinstance(orig_vals, BaseMaskedArray):
assert result_mask is not None # for mypy
if interpolation in {"linear", "midpoint"} and not is_float_dtype(
orig_vals
):
return FloatingArray(vals, result_mask)
else:
# Item "ExtensionDtype" of "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]" has no attribute "numpy_dtype"
# [union-attr]
return type(orig_vals)(
vals.astype(
inference.numpy_dtype # type: ignore[union-attr]
),
result_mask,
)
elif not (
is_integer_dtype(inference)
and interpolation in {"linear", "midpoint"}
):
if needs_i8_conversion(inference):
# error: Item "ExtensionArray" of "Union[ExtensionArray,
# ndarray[Any, Any]]" has no attribute "_ndarray"
vals = vals.astype("i8").view(
orig_vals._ndarray.dtype # type: ignore[union-attr]
)
# error: Item "ExtensionArray" of "Union[ExtensionArray,
# ndarray[Any, Any]]" has no attribute "_from_backing_data"
return orig_vals._from_backing_data( # type: ignore[union-attr]
vals
)
assert isinstance(inference, np.dtype) # for mypy
return vals.astype(inference)
return vals
orig_scalar = is_scalar(q)
if orig_scalar:
# error: Incompatible types in assignment (expression has type "List[
# Union[float, ExtensionArray, ndarray[Any, Any], Index, Series]]",
# variable has type "Union[float, Union[Union[ExtensionArray, ndarray[
# Any, Any]], Index, Series]]")
q = [q] # type: ignore[assignment]
qs = np.array(q, dtype=np.float64)
ids, _, ngroups = self.grouper.group_info
nqs = len(qs)
func = partial(
libgroupby.group_quantile, labels=ids, qs=qs, interpolation=interpolation
)
# Put '-1' (NaN) labels as the last group so it does not interfere
# with the calculations. Note: length check avoids failure on empty
# labels. In that case, the value doesn't matter
na_label_for_sorting = ids.max() + 1 if len(ids) > 0 else 0
labels_for_lexsort = np.where(ids == -1, na_label_for_sorting, ids)
def blk_func(values: ArrayLike) -> ArrayLike:
orig_vals = values
if isinstance(values, BaseMaskedArray):
mask = values._mask
result_mask = np.zeros((ngroups, nqs), dtype=np.bool_)
else:
mask = isna(values)
result_mask = None
is_datetimelike = needs_i8_conversion(values.dtype)
vals, inference = pre_processor(values)
ncols = 1
if vals.ndim == 2:
ncols = vals.shape[0]
shaped_labels = np.broadcast_to(
labels_for_lexsort, (ncols, len(labels_for_lexsort))
)
else:
shaped_labels = labels_for_lexsort
out = np.empty((ncols, ngroups, nqs), dtype=np.float64)
# Get an index of values sorted by values and then labels
order = (vals, shaped_labels)
sort_arr = np.lexsort(order).astype(np.intp, copy=False)
if is_datetimelike:
# This casting needs to happen after the lexsort in order
# to ensure that NaTs are placed at the end and not the front
vals = vals.view("i8").astype(np.float64)
if vals.ndim == 1:
# Ea is always 1d
func(
out[0],
values=vals,
mask=mask,
sort_indexer=sort_arr,
result_mask=result_mask,
)
else:
for i in range(ncols):
func(out[i], values=vals[i], mask=mask[i], sort_indexer=sort_arr[i])
if vals.ndim == 1:
out = out.ravel("K")
if result_mask is not None:
result_mask = result_mask.ravel("K")
else:
out = out.reshape(ncols, ngroups * nqs)
return post_processor(out, inference, result_mask, orig_vals)
data = self._get_data_to_aggregate(numeric_only=numeric_only, name="quantile")
res_mgr = data.grouped_reduce(blk_func)
res = self._wrap_agged_manager(res_mgr)
if orig_scalar:
# Avoid expensive MultiIndex construction
return self._wrap_aggregated_output(res)
return self._wrap_aggregated_output(res, qs=qs)
def ngroup(self, ascending: bool = True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Groups with missing keys (where `pd.isna()` is True) will be labeled with `NaN`
and will be skipped from the count.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> df = pd.DataFrame({"color": ["red", None, "red", "blue", "blue", "red"]})
>>> df
color
0 red
1 None
2 red
3 blue
4 blue
5 red
>>> df.groupby("color").ngroup()
0 1.0
1 NaN
2 1.0
3 0.0
4 0.0
5 1.0
dtype: float64
>>> df.groupby("color", dropna=False).ngroup()
0 1
1 2
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby("color", dropna=False).ngroup(ascending=False)
0 1
1 0
2 1
3 2
4 2
5 1
dtype: int64
"""
obj = self._obj_with_exclusions
index = obj._get_axis(self.axis)
comp_ids = self.grouper.group_info[0]
dtype: type
if self.grouper.has_dropped_na:
comp_ids = np.where(comp_ids == -1, np.nan, comp_ids)
dtype = np.float64
else:
dtype = np.int64
if any(ping._passed_categorical for ping in self.grouper.groupings):
# comp_ids reflect non-observed groups, we need only observed
comp_ids = rank_1d(comp_ids, ties_method="dense") - 1
result = self._obj_1d_constructor(comp_ids, index, dtype=dtype)
if not ascending:
result = self.ngroups - 1 - result
return result
def cumcount(self, ascending: bool = True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
.. code-block:: python
self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Returns
-------
Series
Sequence number of each element within each group.
See Also
--------
.ngroup : Number the groups themselves.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
index = self._obj_with_exclusions._get_axis(self.axis)
cumcounts = self._cumcount_array(ascending=ascending)
return self._obj_1d_constructor(cumcounts, index)
def rank(
self,
method: str = "average",
ascending: bool = True,
na_option: str = "keep",
pct: bool = False,
axis: AxisInt = 0,
) -> NDFrameT:
"""
Provide the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group.
* min: lowest rank in group.
* max: highest rank in group.
* first: ranks assigned in order they appear in the array.
* dense: like 'min', but rank always increases by 1 between groups.
ascending : bool, default True
False for ranks by high (1) to low (N).
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are.
* top: smallest rank if ascending.
* bottom: smallest rank if descending.
pct : bool, default False
Compute percentage rank of data within each group.
axis : int, default 0
The axis of the object over which to compute the rank.
Returns
-------
DataFrame with ranking of values within each group
%(see_also)s
Examples
--------
>>> df = pd.DataFrame(
... {
... "group": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"],
... "value": [2, 4, 2, 3, 5, 1, 2, 4, 1, 5],
... }
... )
>>> df
group value
0 a 2
1 a 4
2 a 2
3 a 3
4 a 5
5 b 1
6 b 2
7 b 4
8 b 1
9 b 5
>>> for method in ['average', 'min', 'max', 'dense', 'first']:
... df[f'{method}_rank'] = df.groupby('group')['value'].rank(method)
>>> df
group value average_rank min_rank max_rank dense_rank first_rank
0 a 2 1.5 1.0 2.0 1.0 1.0
1 a 4 4.0 4.0 4.0 3.0 4.0
2 a 2 1.5 1.0 2.0 1.0 2.0
3 a 3 3.0 3.0 3.0 2.0 3.0
4 a 5 5.0 5.0 5.0 4.0 5.0
5 b 1 1.5 1.0 2.0 1.0 1.0
6 b 2 3.0 3.0 3.0 2.0 3.0
7 b 4 4.0 4.0 4.0 3.0 4.0
8 b 1 1.5 1.0 2.0 1.0 2.0
9 b 5 5.0 5.0 5.0 4.0 5.0
"""
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
kwargs = {
"ties_method": method,
"ascending": ascending,
"na_option": na_option,
"pct": pct,
}
if axis != 0:
# DataFrame uses different keyword name
kwargs["method"] = kwargs.pop("ties_method")
f = lambda x: x.rank(axis=axis, numeric_only=False, **kwargs)
result = self._python_apply_general(
f, self._selected_obj, is_transform=True
)
return result
return self._cython_transform(
"rank",
numeric_only=False,
axis=axis,
**kwargs,
)
def cumprod(self, axis: Axis = 0, *args, **kwargs) -> NDFrameT:
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
f = lambda x: x.cumprod(axis=axis, **kwargs)
return self._python_apply_general(f, self._selected_obj, is_transform=True)
return self._cython_transform("cumprod", **kwargs)
def cumsum(self, axis: Axis = 0, *args, **kwargs) -> NDFrameT:
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
f = lambda x: x.cumsum(axis=axis, **kwargs)
return self._python_apply_general(f, self._selected_obj, is_transform=True)
return self._cython_transform("cumsum", **kwargs)
def cummin(
self, axis: AxisInt = 0, numeric_only: bool = False, **kwargs
) -> NDFrameT:
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
"""
skipna = kwargs.get("skipna", True)
if axis != 0:
f = lambda x: np.minimum.accumulate(x, axis)
obj = self._selected_obj
if numeric_only:
obj = obj._get_numeric_data()
return self._python_apply_general(f, obj, is_transform=True)
return self._cython_transform(
"cummin", numeric_only=numeric_only, skipna=skipna
)
def cummax(
self, axis: AxisInt = 0, numeric_only: bool = False, **kwargs
) -> NDFrameT:
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
"""
skipna = kwargs.get("skipna", True)
if axis != 0:
f = lambda x: np.maximum.accumulate(x, axis)
obj = self._selected_obj
if numeric_only:
obj = obj._get_numeric_data()
return self._python_apply_general(f, obj, is_transform=True)
return self._cython_transform(
"cummax", numeric_only=numeric_only, skipna=skipna
)
def _get_cythonized_result(
self,
base_func: Callable,
cython_dtype: np.dtype,
numeric_only: bool = False,
needs_counts: bool = False,
pre_processing=None,
post_processing=None,
how: str = "any_all",
**kwargs,
):
"""
Get result for Cythonized functions.
Parameters
----------
base_func : callable, Cythonized function to be called
cython_dtype : np.dtype
Type of the array that will be modified by the Cython call.
numeric_only : bool, default False
Whether only numeric datatypes should be computed
needs_counts : bool, default False
Whether the counts should be a part of the Cython call
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython.
Function should return a tuple where the first element is the
values to be passed to Cython and the second element is an optional
type which the values should be converted to after being returned
by the Cython operation. This function is also responsible for
raising a TypeError if the values have an invalid type. Raises
if `needs_values` is False.
post_processing : function, default None
Function to be applied to result of Cython function. Should accept
an array of values as the first argument and type inferences as its
second argument, i.e. the signature should be
(ndarray, Type). If `needs_nullable=True`, a third argument should be
`nullable`, to allow for processing specific to nullable values.
how : str, default any_all
Determines if any/all cython interface or std interface is used.
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if post_processing and not callable(post_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing and not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
grouper = self.grouper
ids, _, ngroups = grouper.group_info
base_func = partial(base_func, labels=ids)
def blk_func(values: ArrayLike) -> ArrayLike:
values = values.T
ncols = 1 if values.ndim == 1 else values.shape[1]
result: ArrayLike
result = np.zeros(ngroups * ncols, dtype=cython_dtype)
result = result.reshape((ngroups, ncols))
func = partial(base_func, out=result)
inferences = None
if needs_counts:
counts = np.zeros(ngroups, dtype=np.int64)
func = partial(func, counts=counts)
is_datetimelike = values.dtype.kind in ["m", "M"]
vals = values
if is_datetimelike and how == "std":
vals = vals.view("i8")
if pre_processing:
vals, inferences = pre_processing(vals)
vals = vals.astype(cython_dtype, copy=False)
if vals.ndim == 1:
vals = vals.reshape((-1, 1))
func = partial(func, values=vals)
if how != "std" or isinstance(values, BaseMaskedArray):
mask = isna(values).view(np.uint8)
if mask.ndim == 1:
mask = mask.reshape(-1, 1)
func = partial(func, mask=mask)
if how != "std":
is_nullable = isinstance(values, BaseMaskedArray)
func = partial(func, nullable=is_nullable)
elif isinstance(values, BaseMaskedArray):
result_mask = np.zeros(result.shape, dtype=np.bool_)
func = partial(func, result_mask=result_mask)
# Call func to modify result in place
if how == "std":
func(**kwargs, is_datetimelike=is_datetimelike)
else:
func(**kwargs)
if values.ndim == 1:
assert result.shape[1] == 1, result.shape
result = result[:, 0]
if post_processing:
pp_kwargs: dict[str, bool | np.ndarray] = {}
pp_kwargs["nullable"] = isinstance(values, BaseMaskedArray)
if how == "std" and pp_kwargs["nullable"]:
pp_kwargs["result_mask"] = result_mask
result = post_processing(result, inferences, **pp_kwargs)
if how == "std" and is_datetimelike:
values = cast("DatetimeArray | TimedeltaArray", values)
unit = values.unit
with warnings.catch_warnings():
# suppress "RuntimeWarning: invalid value encountered in cast"
warnings.filterwarnings("ignore")
result = result.astype(np.int64, copy=False)
result = result.view(f"m8[{unit}]")
return result.T
# Operate block-wise instead of column-by-column
mgr = self._get_data_to_aggregate(numeric_only=numeric_only, name=how)
res_mgr = mgr.grouped_reduce(blk_func)
out = self._wrap_agged_manager(res_mgr)
return self._wrap_aggregated_output(out)
def shift(self, periods: int = 1, freq=None, axis: Axis = 0, fill_value=None):
"""
Shift each group by periods observations.
If freq is passed, the index will be increased using the periods and the freq.
Parameters
----------
periods : int, default 1
Number of periods to shift.
freq : str, optional
Frequency string.
axis : axis to shift, default 0
Shift direction.
fill_value : optional
The scalar value to use for newly introduced missing values.
Returns
-------
Series or DataFrame
Object shifted within each group.
See Also
--------
Index.shift : Shift values of Index.
"""
if freq is not None or axis != 0:
f = lambda x: x.shift(periods, freq, axis, fill_value)
return self._python_apply_general(f, self._selected_obj, is_transform=True)
ids, _, ngroups = self.grouper.group_info
res_indexer = np.zeros(len(ids), dtype=np.int64)
libgroupby.group_shift_indexer(res_indexer, ids, ngroups, periods)
obj = self._obj_with_exclusions
res = obj._reindex_with_indexers(
{self.axis: (obj.axes[self.axis], res_indexer)},
fill_value=fill_value,
allow_dups=True,
)
return res
def diff(self, periods: int = 1, axis: AxisInt = 0) -> NDFrameT:
"""
First discrete difference of element.
Calculates the difference of each element compared with another
element in the group (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
axis : axis to shift, default 0
Take difference over rows (0) or columns (1).
Returns
-------
Series or DataFrame
First differences.
"""
if axis != 0:
return self.apply(lambda x: x.diff(periods=periods, axis=axis))
obj = self._obj_with_exclusions
shifted = self.shift(periods=periods, axis=axis)
# GH45562 - to retain existing behavior and match behavior of Series.diff(),
# int8 and int16 are coerced to float32 rather than float64.
dtypes_to_f32 = ["int8", "int16"]
if obj.ndim == 1:
if obj.dtype in dtypes_to_f32:
shifted = shifted.astype("float32")
else:
to_coerce = [c for c, dtype in obj.dtypes.items() if dtype in dtypes_to_f32]
if len(to_coerce):
shifted = shifted.astype({c: "float32" for c in to_coerce})
return obj - shifted
def pct_change(
self,
periods: int = 1,
fill_method: FillnaOptions = "ffill",
limit=None,
freq=None,
axis: Axis = 0,
):
"""
Calculate pct_change of each value to previous entry in group.
Returns
-------
Series or DataFrame
Percentage changes within each group.
"""
# TODO(GH#23918): Remove this conditional for SeriesGroupBy when
# GH#23918 is fixed
if freq is not None or axis != 0:
f = lambda x: x.pct_change(
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
axis=axis,
)
return self._python_apply_general(f, self._selected_obj, is_transform=True)
if fill_method is None: # GH30463
fill_method = "ffill"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(
self.grouper.codes, axis=self.axis, group_keys=self.group_keys
)
shifted = fill_grp.shift(periods=periods, freq=freq, axis=self.axis)
return (filled / shifted) - 1
def head(self, n: int = 5) -> NDFrameT:
"""
Return first n rows of each group.
Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Parameters
----------
n : int
If positive: number of entries to include from start of each group.
If negative: number of entries to exclude from end of each group.
Returns
-------
Series or DataFrame
Subset of original Series or DataFrame as determined by n.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
... columns=['A', 'B'])
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(-1)
A B
0 1 2
"""
mask = self._make_mask_from_positional_indexer(slice(None, n))
return self._mask_selected_obj(mask)
def tail(self, n: int = 5) -> NDFrameT:
"""
Return last n rows of each group.
Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Parameters
----------
n : int
If positive: number of entries to include from end of each group.
If negative: number of entries to exclude from start of each group.
Returns
-------
Series or DataFrame
Subset of original Series or DataFrame as determined by n.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
... columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').tail(-1)
A B
1 a 2
3 b 2
"""
if n:
mask = self._make_mask_from_positional_indexer(slice(-n, None))
else:
mask = self._make_mask_from_positional_indexer([])
return self._mask_selected_obj(mask)
def _mask_selected_obj(self, mask: npt.NDArray[np.bool_]) -> NDFrameT:
"""
Return _selected_obj with mask applied to the correct axis.
Parameters
----------
mask : np.ndarray[bool]
Boolean mask to apply.
Returns
-------
Series or DataFrame
Filtered _selected_obj.
"""
ids = self.grouper.group_info[0]
mask = mask & (ids != -1)
if self.axis == 0:
return self._selected_obj[mask]
else:
return self._selected_obj.iloc[:, mask]
def _reindex_output(
self,
output: OutputFrameOrSeries,
fill_value: Scalar = np.NaN,
qs: npt.NDArray[np.float64] | None = None,
) -> OutputFrameOrSeries:
"""
If we have categorical groupers, then we might want to make sure that
we have a fully re-indexed output to the levels. This means expanding
the output space to accommodate all values in the cartesian product of
our groups, regardless of whether they were observed in the data or
not. This will expand the output space if there are missing groups.
The method returns early without modifying the input if the number of
groupings is less than 2, self.observed == True or none of the groupers
are categorical.
Parameters
----------
output : Series or DataFrame
Object resulting from grouping and applying an operation.
fill_value : scalar, default np.NaN
Value to use for unobserved categories if self.observed is False.
qs : np.ndarray[float64] or None, default None
quantile values, only relevant for quantile.
Returns
-------
Series or DataFrame
Object (potentially) re-indexed to include all possible groups.
"""
groupings = self.grouper.groupings
if len(groupings) == 1:
return output
# if we only care about the observed values
# we are done
elif self.observed:
return output
# reindexing only applies to a Categorical grouper
elif not any(
isinstance(ping.grouping_vector, (Categorical, CategoricalIndex))
for ping in groupings
):
return output
levels_list = [ping.group_index for ping in groupings]
names = self.grouper.names
if qs is not None:
# error: Argument 1 to "append" of "list" has incompatible type
# "ndarray[Any, dtype[floating[_64Bit]]]"; expected "Index"
levels_list.append(qs) # type: ignore[arg-type]
names = names + [None]
index = MultiIndex.from_product(levels_list, names=names)
if self.sort:
index = index.sort_values()
if self.as_index:
# Always holds for SeriesGroupBy unless GH#36507 is implemented
d = {
self.obj._get_axis_name(self.axis): index,
"copy": False,
"fill_value": fill_value,
}
return output.reindex(**d) # type: ignore[arg-type]
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `output`. An idea is to do:
# output = output.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `output`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = list(
(i, ping.name) for (i, ping) in enumerate(groupings) if ping.in_axis
)
if len(in_axis_grps) > 0:
g_nums, g_names = zip(*in_axis_grps)
output = output.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
output = output.set_index(self.grouper.result_index).reindex(
index, copy=False, fill_value=fill_value
)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
if len(in_axis_grps) > 0:
output = output.reset_index(level=g_nums)
return output.reset_index(drop=True)
def sample(
self,
n: int | None = None,
frac: float | None = None,
replace: bool = False,
weights: Sequence | Series | None = None,
random_state: RandomState | None = None,
):
"""
Return a random sample of items from each group.
You can use `random_state` for reproducibility.
.. versionadded:: 1.1.0
Parameters
----------
n : int, optional
Number of items to return for each group. Cannot be used with
`frac` and must be no larger than the smallest group unless
`replace` is True. Default is one if `frac` is None.
frac : float, optional
Fraction of items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : list-like, optional
Default None results in equal probability weighting.
If passed a list-like then values must have the same length as
the underlying DataFrame or Series object and will be used as
sampling probabilities after normalization within each group.
Values must be non-negative with at least one positive element
within each group.
random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional
If int, array-like, or BitGenerator, seed for random number generator.
If np.random.RandomState or np.random.Generator, use as given.
.. versionchanged:: 1.4.0
np.random.Generator objects now accepted
Returns
-------
Series or DataFrame
A new object of same type as caller containing items randomly
sampled within each group from the caller object.
See Also
--------
DataFrame.sample: Generate random samples from a DataFrame object.
numpy.random.choice: Generate a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame(
... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)}
... )
>>> df
a b
0 red 0
1 red 1
2 blue 2
3 blue 3
4 black 4
5 black 5
Select one row at random for each distinct value in column a. The
`random_state` argument can be used to guarantee reproducibility:
>>> df.groupby("a").sample(n=1, random_state=1)
a b
4 black 4
2 blue 2
1 red 1
Set `frac` to sample fixed proportions rather than counts:
>>> df.groupby("a")["b"].sample(frac=0.5, random_state=2)
5 5
2 2
0 0
Name: b, dtype: int64
Control sample probabilities within groups by setting weights:
>>> df.groupby("a").sample(
... n=1,
... weights=[1, 1, 1, 0, 0, 1],
... random_state=1,
... )
a b
5 black 5
2 blue 2
0 red 0
""" # noqa:E501
if self._selected_obj.empty:
# GH48459 prevent ValueError when object is empty
return self._selected_obj
size = sample.process_sampling_size(n, frac, replace)
if weights is not None:
weights_arr = sample.preprocess_weights(
self._selected_obj, weights, axis=self.axis
)
random_state = com.random_state(random_state)
group_iterator = self.grouper.get_iterator(self._selected_obj, self.axis)
sampled_indices = []
for labels, obj in group_iterator:
grp_indices = self.indices[labels]
group_size = len(grp_indices)
if size is not None:
sample_size = size
else:
assert frac is not None
sample_size = round(frac * group_size)
grp_sample = sample.sample(
group_size,
size=sample_size,
replace=replace,
weights=None if weights is None else weights_arr[grp_indices],
random_state=random_state,
)
sampled_indices.append(grp_indices[grp_sample])
sampled_indices = np.concatenate(sampled_indices)
return self._selected_obj.take(sampled_indices, axis=self.axis)
The provided code snippet includes necessary dependencies for implementing the `get_resampler_for_grouping` function. Write a Python function `def get_resampler_for_grouping( groupby: GroupBy, rule, how=None, fill_method=None, limit=None, kind=None, on=None, **kwargs, ) -> Resampler` to solve the following problem:
Return our appropriate resampler when grouping as well.
Here is the function:
def get_resampler_for_grouping(
groupby: GroupBy,
rule,
how=None,
fill_method=None,
limit=None,
kind=None,
on=None,
**kwargs,
) -> Resampler:
"""
Return our appropriate resampler when grouping as well.
"""
# .resample uses 'on' similar to how .groupby uses 'key'
tg = TimeGrouper(freq=rule, key=on, **kwargs)
resampler = tg._get_resampler(groupby.obj, kind=kind)
return resampler._get_resampler_for_grouping(groupby=groupby, key=tg.key) | Return our appropriate resampler when grouping as well. |
173,235 | from __future__ import annotations
import copy
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Callable,
Hashable,
Literal,
cast,
final,
no_type_check,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._libs.tslibs import (
BaseOffset,
IncompatibleFrequency,
NaT,
Period,
Timedelta,
Timestamp,
to_offset,
)
from pandas._typing import (
AnyArrayLike,
Axis,
AxisInt,
Frequency,
IndexLabel,
NDFrameT,
QuantileInterpolation,
T,
TimedeltaConvertibleTypes,
TimeGrouperOrigin,
TimestampConvertibleTypes,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
import pandas.core.algorithms as algos
from pandas.core.apply import ResamplerWindowApply
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.generic import (
NDFrame,
_shared_docs,
)
from pandas.core.groupby.generic import SeriesGroupBy
from pandas.core.groupby.groupby import (
BaseGroupBy,
GroupBy,
_pipe_template,
get_groupby,
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.ops import BinGrouper
from pandas.core.indexes.datetimes import (
DatetimeIndex,
date_range,
)
from pandas.core.indexes.period import (
PeriodIndex,
period_range,
)
from pandas.core.indexes.timedeltas import (
TimedeltaIndex,
timedelta_range,
)
from pandas.tseries.frequencies import (
is_subperiod,
is_superperiod,
)
from pandas.tseries.offsets import (
Day,
Tick,
)
NDFrameT = TypeVar("NDFrameT", bound="NDFrame")
AxisInt = int
ABCSeries = cast(
"Type[Series]",
create_pandas_abc_type("ABCSeries", "_typ", ("series",)),
)
ABCDataFrame = cast(
"Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
)
)
def _take_new_index(
obj: NDFrameT, indexer: npt.NDArray[np.intp], new_index: Index, axis: AxisInt = 0
) -> NDFrameT:
if isinstance(obj, ABCSeries):
new_values = algos.take_nd(obj._values, indexer)
# error: Incompatible return value type (got "Series", expected "NDFrameT")
return obj._constructor( # type: ignore[return-value]
new_values, index=new_index, name=obj.name
)
elif isinstance(obj, ABCDataFrame):
if axis == 1:
raise NotImplementedError("axis 1 is not supported")
new_mgr = obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1)
# error: Incompatible return value type
# (got "DataFrame", expected "NDFrameT")
return obj._constructor(new_mgr) # type: ignore[return-value]
else:
raise ValueError("'obj' should be either a Series or a DataFrame") | null |
173,236 | from __future__ import annotations
import copy
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Callable,
Hashable,
Literal,
cast,
final,
no_type_check,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._libs.tslibs import (
BaseOffset,
IncompatibleFrequency,
NaT,
Period,
Timedelta,
Timestamp,
to_offset,
)
from pandas._typing import (
AnyArrayLike,
Axis,
AxisInt,
Frequency,
IndexLabel,
NDFrameT,
QuantileInterpolation,
T,
TimedeltaConvertibleTypes,
TimeGrouperOrigin,
TimestampConvertibleTypes,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
import pandas.core.algorithms as algos
from pandas.core.apply import ResamplerWindowApply
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.generic import (
NDFrame,
_shared_docs,
)
from pandas.core.groupby.generic import SeriesGroupBy
from pandas.core.groupby.groupby import (
BaseGroupBy,
GroupBy,
_pipe_template,
get_groupby,
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.ops import BinGrouper
from pandas.core.indexes.datetimes import (
DatetimeIndex,
date_range,
)
from pandas.core.indexes.period import (
PeriodIndex,
period_range,
)
from pandas.core.indexes.timedeltas import (
TimedeltaIndex,
timedelta_range,
)
from pandas.tseries.frequencies import (
is_subperiod,
is_superperiod,
)
from pandas.tseries.offsets import (
Day,
Tick,
)
def _get_timestamp_range_edges(
first: Timestamp,
last: Timestamp,
freq: BaseOffset,
unit: str,
closed: Literal["right", "left"] = "left",
origin: TimeGrouperOrigin = "start_day",
offset: Timedelta | None = None,
) -> tuple[Timestamp, Timestamp]:
"""
Adjust the `first` Timestamp to the preceding Timestamp that resides on
the provided offset. Adjust the `last` Timestamp to the following
Timestamp that resides on the provided offset. Input Timestamps that
already reside on the offset will be adjusted depending on the type of
offset and the `closed` parameter.
Parameters
----------
first : pd.Timestamp
The beginning Timestamp of the range to be adjusted.
last : pd.Timestamp
The ending Timestamp of the range to be adjusted.
freq : pd.DateOffset
The dateoffset to which the Timestamps will be adjusted.
closed : {'right', 'left'}, default "left"
Which side of bin interval is closed.
origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin must
match the timezone of the index.
If a timestamp is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
offset : pd.Timedelta, default is None
An offset timedelta added to the origin.
Returns
-------
A tuple of length 2, containing the adjusted pd.Timestamp objects.
"""
if isinstance(freq, Tick):
index_tz = first.tz
if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None):
raise ValueError("The origin must have the same timezone as the index.")
if origin == "epoch":
# set the epoch based on the timezone to have similar bins results when
# resampling on the same kind of indexes on different timezones
origin = Timestamp("1970-01-01", tz=index_tz)
if isinstance(freq, Day):
# _adjust_dates_anchored assumes 'D' means 24H, but first/last
# might contain a DST transition (23H, 24H, or 25H).
# So "pretend" the dates are naive when adjusting the endpoints
first = first.tz_localize(None)
last = last.tz_localize(None)
if isinstance(origin, Timestamp):
origin = origin.tz_localize(None)
first, last = _adjust_dates_anchored(
first, last, freq, closed=closed, origin=origin, offset=offset, unit=unit
)
if isinstance(freq, Day):
first = first.tz_localize(index_tz)
last = last.tz_localize(index_tz)
else:
first = first.normalize()
last = last.normalize()
if closed == "left":
first = Timestamp(freq.rollback(first))
else:
first = Timestamp(first - freq)
last = Timestamp(last + freq)
return first, last
Literal: _SpecialForm = ...
TimeGrouperOrigin = Union[
"Timestamp", Literal["epoch", "start", "start_day", "end", "end_day"]
]
The provided code snippet includes necessary dependencies for implementing the `_get_period_range_edges` function. Write a Python function `def _get_period_range_edges( first: Period, last: Period, freq: BaseOffset, closed: Literal["right", "left"] = "left", origin: TimeGrouperOrigin = "start_day", offset: Timedelta | None = None, ) -> tuple[Period, Period]` to solve the following problem:
Adjust the provided `first` and `last` Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. freq : pd.DateOffset The freq to which the Periods will be adjusted. closed : {'right', 'left'}, default "left" Which side of bin interval is closed. origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a timestamp is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries offset : pd.Timedelta, default is None An offset timedelta added to the origin. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects.
Here is the function:
def _get_period_range_edges(
first: Period,
last: Period,
freq: BaseOffset,
closed: Literal["right", "left"] = "left",
origin: TimeGrouperOrigin = "start_day",
offset: Timedelta | None = None,
) -> tuple[Period, Period]:
"""
Adjust the provided `first` and `last` Periods to the respective Period of
the given offset that encompasses them.
Parameters
----------
first : pd.Period
The beginning Period of the range to be adjusted.
last : pd.Period
The ending Period of the range to be adjusted.
freq : pd.DateOffset
The freq to which the Periods will be adjusted.
closed : {'right', 'left'}, default "left"
Which side of bin interval is closed.
origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin must
match the timezone of the index.
If a timestamp is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
offset : pd.Timedelta, default is None
An offset timedelta added to the origin.
Returns
-------
A tuple of length 2, containing the adjusted pd.Period objects.
"""
if not all(isinstance(obj, Period) for obj in [first, last]):
raise TypeError("'first' and 'last' must be instances of type Period")
# GH 23882
first_ts = first.to_timestamp()
last_ts = last.to_timestamp()
adjust_first = not freq.is_on_offset(first_ts)
adjust_last = freq.is_on_offset(last_ts)
first_ts, last_ts = _get_timestamp_range_edges(
first_ts, last_ts, freq, unit="ns", closed=closed, origin=origin, offset=offset
)
first = (first_ts + int(adjust_first) * freq).to_period(freq)
last = (last_ts - int(adjust_last) * freq).to_period(freq)
return first, last | Adjust the provided `first` and `last` Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. freq : pd.DateOffset The freq to which the Periods will be adjusted. closed : {'right', 'left'}, default "left" Which side of bin interval is closed. origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a timestamp is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries offset : pd.Timedelta, default is None An offset timedelta added to the origin. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects. |
173,237 | from __future__ import annotations
import copy
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Callable,
Hashable,
Literal,
cast,
final,
no_type_check,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._libs.tslibs import (
BaseOffset,
IncompatibleFrequency,
NaT,
Period,
Timedelta,
Timestamp,
to_offset,
)
from pandas._typing import (
AnyArrayLike,
Axis,
AxisInt,
Frequency,
IndexLabel,
NDFrameT,
QuantileInterpolation,
T,
TimedeltaConvertibleTypes,
TimeGrouperOrigin,
TimestampConvertibleTypes,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
import pandas.core.algorithms as algos
from pandas.core.apply import ResamplerWindowApply
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.generic import (
NDFrame,
_shared_docs,
)
from pandas.core.groupby.generic import SeriesGroupBy
from pandas.core.groupby.groupby import (
BaseGroupBy,
GroupBy,
_pipe_template,
get_groupby,
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.ops import BinGrouper
from pandas.core.indexes.datetimes import (
DatetimeIndex,
date_range,
)
from pandas.core.indexes.period import (
PeriodIndex,
period_range,
)
from pandas.core.indexes.timedeltas import (
TimedeltaIndex,
timedelta_range,
)
from pandas.tseries.frequencies import (
is_subperiod,
is_superperiod,
)
from pandas.tseries.offsets import (
Day,
Tick,
)
)
class PeriodIndex(DatetimeIndexOpsMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
day_of_week
dayofyear
day_of_year
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
_typ = "periodindex"
_data: PeriodArray
freq: BaseOffset
dtype: PeriodDtype
_data_cls = PeriodArray
_supports_partial_string_indexing = True
def _engine_type(self) -> type[libindex.PeriodEngine]:
return libindex.PeriodEngine
def _resolution_obj(self) -> Resolution:
# for compat with DatetimeIndex
return self.dtype._resolution_obj
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
PeriodArray.asfreq,
other="pandas.arrays.PeriodArray",
other_name="PeriodArray",
**_shared_doc_kwargs,
)
def asfreq(self, freq=None, how: str = "E") -> PeriodIndex:
arr = self._data.asfreq(freq, how)
return type(self)._simple_new(arr, name=self.name)
def to_timestamp(self, freq=None, how: str = "start") -> DatetimeIndex:
arr = self._data.to_timestamp(freq, how)
return DatetimeIndex._simple_new(arr, name=self.name)
def hour(self) -> Index:
return Index(self._data.hour, name=self.name)
def minute(self) -> Index:
return Index(self._data.minute, name=self.name)
def second(self) -> Index:
return Index(self._data.second, name=self.name)
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
**fields,
) -> PeriodIndex:
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
refs = None
if not copy and isinstance(data, (Index, ABCSeries)):
refs = data._references
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
if not fields:
# test_pickle_compat_construction
cls._raise_scalar_data_error(None)
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq=freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name, refs=refs)
# ------------------------------------------------------------------------
# Data
def values(self) -> np.ndarray:
return np.asarray(self, dtype=object)
def _maybe_convert_timedelta(self, other) -> int | npt.NDArray[np.int64]:
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
# For the subset of DateOffsets that can be a dtype.freq, it
# suffices (and is much faster) to compare the dtype_code rather than
# the freq itself.
# See also: PeriodDtype.__eq__
freq = dtype.freq
own_freq = self.freq
return (
freq._period_dtype_code
# error: "BaseOffset" has no attribute "_period_dtype_code"
== own_freq._period_dtype_code # type: ignore[attr-defined]
and freq.n == own_freq.n
)
# ------------------------------------------------------------------------
# Index Methods
def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> np.ndarray:
"""
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
"""
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif not isinstance(where, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
return super().asof_locs(where, mask)
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic_increasing:
raise ValueError("Index is not monotonic")
values = self.asi8
return bool(((values[1:] - values[:-1]) < 2).all())
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
# ------------------------------------------------------------------------
# Indexing Methods
def _convert_tolerance(self, tolerance, target):
# Returned tolerance must be in dtype/units so that
# `|self._get_engine_target() - target._engine_target()| <= tolerance`
# is meaningful. Since PeriodIndex returns int64 for engine_target,
# we may need to convert timedelta64 tolerance to int64.
tolerance = super()._convert_tolerance(tolerance, target)
if self.dtype == target.dtype:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return tolerance
def get_loc(self, key):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
self._check_indexing_error(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
parsed, reso = self._parse_with_reso(key)
except ValueError as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
if self._can_partial_date_slice(reso):
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
if reso == self._resolution_obj:
# the reso < self._resolution_obj case goes
# through _get_string_slice
key = self._cast_partial_indexing_scalar(parsed)
else:
raise KeyError(key)
elif isinstance(key, Period):
self._disallow_mismatched_indexing(key)
elif isinstance(key, datetime):
key = self._cast_partial_indexing_scalar(key)
else:
# in particular integer, which Period constructor would cast to string
raise KeyError(key)
try:
return Index.get_loc(self, key)
except KeyError as err:
raise KeyError(orig_key) from err
def _disallow_mismatched_indexing(self, key: Period) -> None:
sfreq = self.freq
kfreq = key.freq
if not (
sfreq.n == kfreq.n
# error: "BaseOffset" has no attribute "_period_dtype_code"
and sfreq._period_dtype_code # type: ignore[attr-defined]
# error: "BaseOffset" has no attribute "_period_dtype_code"
== kfreq._period_dtype_code # type: ignore[attr-defined]
):
# GH#42247 For the subset of DateOffsets that can be Period freqs,
# checking these two attributes is sufficient to check equality,
# and much more performant than `self.freq == key.freq`
raise KeyError(key)
def _cast_partial_indexing_scalar(self, label: datetime) -> Period:
try:
period = Period(label, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(label) from err
return period
def _maybe_cast_slice_bound(self, label, side: str):
if isinstance(label, datetime):
label = self._cast_partial_indexing_scalar(label)
return super()._maybe_cast_slice_bound(label, side)
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
iv = Period(parsed, freq=reso.attr_abbrev)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def shift(self, periods: int = 1, freq=None):
if freq is not None:
raise TypeError(
f"`freq` argument is not supported for {type(self).__name__}.shift"
)
return self + periods
def _insert_nat_bin(
binner: PeriodIndex, bins: np.ndarray, labels: PeriodIndex, nat_count: int
) -> tuple[PeriodIndex, np.ndarray, PeriodIndex]:
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
# shift bins by the number of NaT
assert nat_count > 0
bins += nat_count
bins = np.insert(bins, 0, nat_count)
# Incompatible types in assignment (expression has type "Index", variable
# has type "PeriodIndex")
binner = binner.insert(0, NaT) # type: ignore[assignment]
# Incompatible types in assignment (expression has type "Index", variable
# has type "PeriodIndex")
labels = labels.insert(0, NaT) # type: ignore[assignment]
return binner, bins, labels | null |
173,238 | from __future__ import annotations
import copy
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Callable,
Hashable,
Literal,
cast,
final,
no_type_check,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._libs.tslibs import (
BaseOffset,
IncompatibleFrequency,
NaT,
Period,
Timedelta,
Timestamp,
to_offset,
)
from pandas._typing import (
AnyArrayLike,
Axis,
AxisInt,
Frequency,
IndexLabel,
NDFrameT,
QuantileInterpolation,
T,
TimedeltaConvertibleTypes,
TimeGrouperOrigin,
TimestampConvertibleTypes,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
import pandas.core.algorithms as algos
from pandas.core.apply import ResamplerWindowApply
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.generic import (
NDFrame,
_shared_docs,
)
from pandas.core.groupby.generic import SeriesGroupBy
from pandas.core.groupby.groupby import (
BaseGroupBy,
GroupBy,
_pipe_template,
get_groupby,
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.ops import BinGrouper
from pandas.core.indexes.datetimes import (
DatetimeIndex,
date_range,
)
from pandas.core.indexes.period import (
PeriodIndex,
period_range,
)
from pandas.core.indexes.timedeltas import (
TimedeltaIndex,
timedelta_range,
)
from pandas.tseries.frequencies import (
is_subperiod,
is_superperiod,
)
from pandas.tseries.offsets import (
Day,
Tick,
)
def find_stack_level() -> int:
"""
Find the first place in the stack that is not inside pandas
(tests notwithstanding).
"""
import pandas as pd
pkg_dir = os.path.dirname(pd.__file__)
test_dir = os.path.join(pkg_dir, "tests")
# https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
frame = inspect.currentframe()
n = 0
while frame:
fname = inspect.getfile(frame)
if fname.startswith(pkg_dir) and not fname.startswith(test_dir):
frame = frame.f_back
n += 1
else:
break
return n
The provided code snippet includes necessary dependencies for implementing the `maybe_warn_args_and_kwargs` function. Write a Python function `def maybe_warn_args_and_kwargs(cls, kernel: str, args, kwargs) -> None` to solve the following problem:
Warn for deprecation of args and kwargs in resample functions. Parameters ---------- cls : type Class to warn about. kernel : str Operation name. args : tuple or None args passed by user. Will be None if and only if kernel does not have args. kwargs : dict or None kwargs passed by user. Will be None if and only if kernel does not have kwargs.
Here is the function:
def maybe_warn_args_and_kwargs(cls, kernel: str, args, kwargs) -> None:
"""
Warn for deprecation of args and kwargs in resample functions.
Parameters
----------
cls : type
Class to warn about.
kernel : str
Operation name.
args : tuple or None
args passed by user. Will be None if and only if kernel does not have args.
kwargs : dict or None
kwargs passed by user. Will be None if and only if kernel does not have kwargs.
"""
warn_args = args is not None and len(args) > 0
warn_kwargs = kwargs is not None and len(kwargs) > 0
if warn_args and warn_kwargs:
msg = "args and kwargs"
elif warn_args:
msg = "args"
elif warn_kwargs:
msg = "kwargs"
else:
return
warnings.warn(
f"Passing additional {msg} to {cls.__name__}.{kernel} has "
"no impact on the result and is deprecated. This will "
"raise a TypeError in a future version of pandas.",
category=FutureWarning,
stacklevel=find_stack_level(),
) | Warn for deprecation of args and kwargs in resample functions. Parameters ---------- cls : type Class to warn about. kernel : str Operation name. args : tuple or None args passed by user. Will be None if and only if kernel does not have args. kwargs : dict or None kwargs passed by user. Will be None if and only if kernel does not have kwargs. |
173,239 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
AxisInt,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
The provided code snippet includes necessary dependencies for implementing the `check_value_size` function. Write a Python function `def check_value_size(value, mask: npt.NDArray[np.bool_], length: int)` to solve the following problem:
Validate the size of the values passed to ExtensionArray.fillna.
Here is the function:
def check_value_size(value, mask: npt.NDArray[np.bool_], length: int):
"""
Validate the size of the values passed to ExtensionArray.fillna.
"""
if is_array_like(value):
if len(value) != length:
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {length}"
)
value = value[mask]
return value | Validate the size of the values passed to ExtensionArray.fillna. |
173,240 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
AxisInt,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
ArrayLike = Union["ExtensionArray", np.ndarray]
def infer_dtype_from(val, pandas_dtype: bool = False) -> tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if not is_list_like(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def is_object_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the object dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the object dtype.
Examples
--------
>>> from pandas.api.types import is_object_dtype
>>> is_object_dtype(object)
True
>>> is_object_dtype(int)
False
>>> is_object_dtype(np.array([], dtype=object))
True
>>> is_object_dtype(np.array([], dtype=int))
False
>>> is_object_dtype([1, 2, 3])
False
"""
return _is_dtype_type(arr_or_dtype, classes(np.object_))
def is_numeric_v_string_like(a: ArrayLike, b) -> bool:
"""
Check if we are comparing a string-like object to a numeric ndarray.
NumPy doesn't like to compare such objects, especially numeric arrays
and scalar string-likes.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a string-like object to a numeric array.
Examples
--------
>>> is_numeric_v_string_like(np.array([1]), "foo")
True
>>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
True
>>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
True
>>> is_numeric_v_string_like(np.array([1]), np.array([2]))
False
>>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
False
"""
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
is_a_numeric_array = is_a_array and a.dtype.kind in ("u", "i", "f", "c", "b")
is_b_numeric_array = is_b_array and b.dtype.kind in ("u", "i", "f", "c", "b")
is_a_string_array = is_a_array and a.dtype.kind in ("S", "U")
is_b_string_array = is_b_array and b.dtype.kind in ("S", "U")
is_b_scalar_string_like = not is_b_array and isinstance(b, str)
return (
(is_a_numeric_array and is_b_scalar_string_like)
or (is_a_numeric_array and is_b_string_array)
or (is_b_numeric_array and is_a_string_array)
)
def isna(obj: Scalar) -> bool:
...
def isna(
obj: ArrayLike | Index | list,
) -> npt.NDArray[np.bool_]:
...
def isna(obj: NDFrameT) -> NDFrameT:
...
def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
The provided code snippet includes necessary dependencies for implementing the `mask_missing` function. Write a Python function `def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]` to solve the following problem:
Return a masking array of same size/shape as arr with entries equaling any member of values_to_mask set to True Parameters ---------- arr : ArrayLike values_to_mask: list, tuple, or scalar Returns ------- np.ndarray[bool]
Here is the function:
def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]:
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
Parameters
----------
arr : ArrayLike
values_to_mask: list, tuple, or scalar
Returns
-------
np.ndarray[bool]
"""
# When called from Block.replace/replace_list, values_to_mask is a scalar
# known to be holdable by arr.
# When called from Series._single_replace, values_to_mask is tuple or list
dtype, values_to_mask = infer_dtype_from(values_to_mask)
# error: Argument "dtype" to "array" has incompatible type "Union[dtype[Any],
# ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
# _DTypeDict, Tuple[Any, Any]]]"
values_to_mask = np.array(values_to_mask, dtype=dtype) # type: ignore[arg-type]
potential_na = False
if is_object_dtype(arr):
# pre-compute mask to avoid comparison to NA
potential_na = True
arr_mask = ~isna(arr)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
# GH 21977
mask = np.zeros(arr.shape, dtype=bool)
for x in nonna:
if is_numeric_v_string_like(arr, x):
# GH#29553 prevent numpy deprecation warnings
pass
else:
if potential_na:
new_mask = np.zeros(arr.shape, dtype=np.bool_)
new_mask[arr_mask] = arr[arr_mask] == x
else:
new_mask = arr == x
if not isinstance(new_mask, np.ndarray):
# usually BooleanArray
new_mask = new_mask.to_numpy(dtype=bool, na_value=False)
mask |= new_mask
if na_mask.any():
mask |= isna(arr)
return mask | Return a masking array of same size/shape as arr with entries equaling any member of values_to_mask set to True Parameters ---------- arr : ArrayLike values_to_mask: list, tuple, or scalar Returns ------- np.ndarray[bool] |
173,241 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
AxisInt,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
def clean_fill_method(method: str | None, allow_nearest: bool = False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
if isinstance(method, str):
method = method.lower()
if method == "ffill":
method = "pad"
elif method == "bfill":
method = "backfill"
valid_methods = ["pad", "backfill"]
expecting = "pad (ffill) or backfill (bfill)"
if allow_nearest:
valid_methods.append("nearest")
expecting = "pad (ffill), backfill (bfill) or nearest"
if method not in valid_methods:
raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}")
return method
def _interpolate_2d_with_fill(
data: np.ndarray, # floating dtype
index: Index,
axis: AxisInt,
method: str = "linear",
limit: int | None = None,
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
**kwargs,
) -> None:
"""
Column-wise application of _interpolate_1d.
Notes
-----
Alters 'data' in-place.
The signature does differ from _interpolate_1d because it only
includes what is needed for Block.interpolate.
"""
# validate the interp method
clean_interp_method(method, index, **kwargs)
if is_valid_na_for_dtype(fill_value, data.dtype):
fill_value = na_value_for_dtype(data.dtype, compat=False)
if method == "time":
if not needs_i8_conversion(index.dtype):
raise ValueError(
"time-weighted interpolation only works "
"on Series or DataFrames with a "
"DatetimeIndex"
)
method = "values"
valid_limit_directions = ["forward", "backward", "both"]
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
raise ValueError(
"Invalid limit_direction: expecting one of "
f"{valid_limit_directions}, got '{limit_direction}'."
)
if limit_area is not None:
valid_limit_areas = ["inside", "outside"]
limit_area = limit_area.lower()
if limit_area not in valid_limit_areas:
raise ValueError(
f"Invalid limit_area: expecting one of {valid_limit_areas}, got "
f"{limit_area}."
)
# default limit is unlimited GH #16282
limit = algos.validate_limit(nobs=None, limit=limit)
indices = _index_to_interp_indices(index, method)
def func(yvalues: np.ndarray) -> None:
# process 1-d slices in the axis direction
_interpolate_1d(
indices=indices,
yvalues=yvalues,
method=method,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
bounds_error=False,
**kwargs,
)
# error: Argument 1 to "apply_along_axis" has incompatible type
# "Callable[[ndarray[Any, Any]], None]"; expected "Callable[...,
# Union[_SupportsArray[dtype[<nothing>]], Sequence[_SupportsArray
# [dtype[<nothing>]]], Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]],
# Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
# Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]]]]]"
np.apply_along_axis(func, axis, data) # type: ignore[arg-type]
def interpolate_2d(
values: np.ndarray,
method: str = "pad",
axis: Axis = 0,
limit: int | None = None,
limit_area: str | None = None,
) -> None:
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
Parameters
----------
values: np.ndarray
Input array.
method: str, default "pad"
Interpolation method. Could be "bfill" or "pad"
axis: 0 or 1
Interpolation axis
limit: int, optional
Index limit on interpolation.
limit_area: str, optional
Limit area for interpolation. Can be "inside" or "outside"
Notes
-----
Modifies values in-place.
"""
if limit_area is not None:
np.apply_along_axis(
# error: Argument 1 to "apply_along_axis" has incompatible type
# "partial[None]"; expected
# "Callable[..., Union[_SupportsArray[dtype[<nothing>]],
# Sequence[_SupportsArray[dtype[<nothing>]]],
# Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]],
# Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
# Sequence[Sequence[Sequence[Sequence[_
# SupportsArray[dtype[<nothing>]]]]]]]]"
partial( # type: ignore[arg-type]
_interpolate_with_limit_area,
method=method,
limit=limit,
limit_area=limit_area,
),
# error: Argument 2 to "apply_along_axis" has incompatible type
# "Union[str, int]"; expected "SupportsIndex"
axis, # type: ignore[arg-type]
values,
)
return
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0")
values = values.reshape(tuple((1,) + values.shape))
method = clean_fill_method(method)
tvalues = transf(values)
# _pad_2d and _backfill_2d both modify tvalues inplace
if method == "pad":
_pad_2d(tvalues, limit=limit)
else:
_backfill_2d(tvalues, limit=limit)
return
Any = object()
AxisInt = int
The provided code snippet includes necessary dependencies for implementing the `interpolate_array_2d` function. Write a Python function `def interpolate_array_2d( data: np.ndarray, method: str = "pad", axis: AxisInt = 0, index: Index | None = None, limit: int | None = None, limit_direction: str = "forward", limit_area: str | None = None, fill_value: Any | None = None, coerce: bool = False, downcast: str | None = None, **kwargs, ) -> None` to solve the following problem:
Wrapper to dispatch to either interpolate_2d or _interpolate_2d_with_fill. Notes ----- Alters 'data' in-place.
Here is the function:
def interpolate_array_2d(
data: np.ndarray,
method: str = "pad",
axis: AxisInt = 0,
index: Index | None = None,
limit: int | None = None,
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
coerce: bool = False,
downcast: str | None = None,
**kwargs,
) -> None:
"""
Wrapper to dispatch to either interpolate_2d or _interpolate_2d_with_fill.
Notes
-----
Alters 'data' in-place.
"""
try:
m = clean_fill_method(method)
except ValueError:
m = None
if m is not None:
if fill_value is not None:
# similar to validate_fillna_kwargs
raise ValueError("Cannot pass both fill_value and method")
interpolate_2d(
data,
method=m,
axis=axis,
limit=limit,
limit_area=limit_area,
)
else:
assert index is not None # for mypy
_interpolate_2d_with_fill(
data=data,
index=index,
axis=axis,
method=method,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
**kwargs,
) | Wrapper to dispatch to either interpolate_2d or _interpolate_2d_with_fill. Notes ----- Alters 'data' in-place. |
173,242 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
AxisInt,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ...
def cast(typ: Type[_T], val: Any) -> _T: ...
def cast(typ: str, val: Any) -> Any: ...
def cast(typ: object, val: Any) -> Any: ...
F = TypeVar("F", bound=FuncType)
def needs_i8_conversion(arr_or_dtype) -> bool:
"""
Check whether the array or dtype should be converted to int64.
An array-like or dtype "needs" such a conversion if the array-like
or dtype is of a datetime-like dtype
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype should be converted to int64.
Examples
--------
>>> needs_i8_conversion(str)
False
>>> needs_i8_conversion(np.int64)
False
>>> needs_i8_conversion(np.datetime64)
True
>>> needs_i8_conversion(np.array(['a', 'b']))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
True
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
"""
if arr_or_dtype is None:
return False
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype.kind in ["m", "M"]
elif isinstance(arr_or_dtype, ExtensionDtype):
return isinstance(arr_or_dtype, (PeriodDtype, DatetimeTZDtype))
try:
dtype = get_dtype(arr_or_dtype)
except (TypeError, ValueError):
return False
if isinstance(dtype, np.dtype):
return dtype.kind in ["m", "M"]
return isinstance(dtype, (PeriodDtype, DatetimeTZDtype))
def isna(obj: Scalar) -> bool:
...
def isna(
obj: ArrayLike | Index | list,
) -> npt.NDArray[np.bool_]:
...
def isna(obj: NDFrameT) -> NDFrameT:
...
def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
The provided code snippet includes necessary dependencies for implementing the `_datetimelike_compat` function. Write a Python function `def _datetimelike_compat(func: F) -> F` to solve the following problem:
Wrapper to handle datetime64 and timedelta64 dtypes.
Here is the function:
def _datetimelike_compat(func: F) -> F:
"""
Wrapper to handle datetime64 and timedelta64 dtypes.
"""
@wraps(func)
def new_func(values, limit=None, mask=None):
if needs_i8_conversion(values.dtype):
if mask is None:
# This needs to occur before casting to int64
mask = isna(values)
result, mask = func(values.view("i8"), limit=limit, mask=mask)
return result.view(values.dtype), mask
return func(values, limit=limit, mask=mask)
return cast(F, new_func) | Wrapper to handle datetime64 and timedelta64 dtypes. |
173,243 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
AxisInt,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
def _fillna_prep(
values, mask: npt.NDArray[np.bool_] | None = None
) -> npt.NDArray[np.bool_]:
# boilerplate for _pad_1d, _backfill_1d, _pad_2d, _backfill_2d
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
return mask
def _pad_1d(
values: np.ndarray,
limit: int | None = None,
mask: npt.NDArray[np.bool_] | None = None,
) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
mask = _fillna_prep(values, mask)
algos.pad_inplace(values, mask, limit=limit)
return values, mask | null |
173,244 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
AxisInt,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
def _fillna_prep(
values, mask: npt.NDArray[np.bool_] | None = None
) -> npt.NDArray[np.bool_]:
# boilerplate for _pad_1d, _backfill_1d, _pad_2d, _backfill_2d
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
return mask
def _backfill_1d(
values: np.ndarray,
limit: int | None = None,
mask: npt.NDArray[np.bool_] | None = None,
) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
mask = _fillna_prep(values, mask)
algos.backfill_inplace(values, mask, limit=limit)
return values, mask | null |
173,245 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
AxisInt,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
def clean_fill_method(method: str | None, allow_nearest: bool = False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
if isinstance(method, str):
method = method.lower()
if method == "ffill":
method = "pad"
elif method == "bfill":
method = "backfill"
valid_methods = ["pad", "backfill"]
expecting = "pad (ffill) or backfill (bfill)"
if allow_nearest:
valid_methods.append("nearest")
expecting = "pad (ffill), backfill (bfill) or nearest"
if method not in valid_methods:
raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}")
return method
def _pad_2d(values: np.ndarray, limit=None, mask: npt.NDArray[np.bool_] | None = None):
mask = _fillna_prep(values, mask)
if np.all(values.shape):
algos.pad_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values, mask
def _backfill_2d(values, limit=None, mask: npt.NDArray[np.bool_] | None = None):
mask = _fillna_prep(values, mask)
if np.all(values.shape):
algos.backfill_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values, mask
_fill_methods = {"pad": _pad_1d, "backfill": _backfill_1d}
def get_fill_func(method, ndim: int = 1):
method = clean_fill_method(method)
if ndim == 1:
return _fill_methods[method]
return {"pad": _pad_2d, "backfill": _backfill_2d}[method] | null |
173,246 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
AxisInt,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
def clean_fill_method(method: str | None, allow_nearest: bool = False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
if isinstance(method, str):
method = method.lower()
if method == "ffill":
method = "pad"
elif method == "bfill":
method = "backfill"
valid_methods = ["pad", "backfill"]
expecting = "pad (ffill) or backfill (bfill)"
if allow_nearest:
valid_methods.append("nearest")
expecting = "pad (ffill), backfill (bfill) or nearest"
if method not in valid_methods:
raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}")
return method
def clean_reindex_fill_method(method) -> str | None:
return clean_fill_method(method, allow_nearest=True) | null |
173,247 | from __future__ import annotations
import ast
from functools import (
partial,
reduce,
)
from keyword import iskeyword
import tokenize
from typing import (
Callable,
TypeVar,
)
import numpy as np
from pandas.compat import PY39
from pandas.errors import UndefinedVariableError
import pandas.core.common as com
from pandas.core.computation.ops import (
ARITH_OPS_SYMS,
BOOL_OPS_SYMS,
CMP_OPS_SYMS,
LOCAL_TAG,
MATHOPS,
REDUCTIONS,
UNARY_OPS_SYMS,
BinOp,
Constant,
Div,
FuncNode,
Op,
Term,
UnaryOp,
is_term,
)
from pandas.core.computation.parsing import (
clean_backtick_quoted_toks,
tokenize_string,
)
from pandas.core.computation.scope import Scope
from pandas.io.formats import printing
def _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]:
"""
Rewrite the assignment operator for PyTables expressions that use ``=``
as a substitute for ``==``.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
return toknum, "==" if tokval == "=" else tokval
def _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]:
"""
Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
if toknum == tokenize.OP:
if tokval == "&":
return tokenize.NAME, "and"
elif tokval == "|":
return tokenize.NAME, "or"
return toknum, tokval
return toknum, tokval
def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]:
"""
Replace local variables with a syntactically valid name.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
tuple of int, str
Either the input or token or the replacement values
Notes
-----
This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as
``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``
is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
"""
toknum, tokval = tok
if toknum == tokenize.OP and tokval == "@":
return tokenize.OP, LOCAL_TAG
return toknum, tokval
def _compose(*funcs):
"""
Compose 2 or more callables.
"""
assert len(funcs) > 1, "At least 2 callables must be passed to compose"
return reduce(_compose2, funcs)
def clean_backtick_quoted_toks(tok: tuple[int, str]) -> tuple[int, str]:
"""
Clean up a column name if surrounded by backticks.
Backtick quoted string are indicated by a certain tokval value. If a string
is a backtick quoted token it will processed by
:func:`_create_valid_python_identifier` so that the parser can find this
string when the query is executed.
In this case the tok will get the NAME tokval.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
tok : Tuple[int, str]
Either the input or token or the replacement values
"""
toknum, tokval = tok
if toknum == BACKTICK_QUOTED_STRING:
return tokenize.NAME, create_valid_python_identifier(tokval)
return toknum, tokval
def tokenize_string(source: str) -> Iterator[tuple[int, str]]:
"""
Tokenize a Python source code string.
Parameters
----------
source : str
The Python source code string.
Returns
-------
tok_generator : Iterator[Tuple[int, str]]
An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]).
"""
line_reader = StringIO(source).readline
token_generator = tokenize.generate_tokens(line_reader)
# Loop over all tokens till a backtick (`) is found.
# Then, take all tokens till the next backtick to form a backtick quoted string
for toknum, tokval, start, _, _ in token_generator:
if tokval == "`":
try:
yield tokenize_backtick_quoted_string(
token_generator, source, string_start=start[1] + 1
)
except Exception as err:
raise SyntaxError(f"Failed to parse backticks in '{source}'.") from err
else:
yield toknum, tokval
The provided code snippet includes necessary dependencies for implementing the `_preparse` function. Write a Python function `def _preparse( source: str, f=_compose( _replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks ), ) -> str` to solve the following problem:
Compose a collection of tokenization functions. Parameters ---------- source : str A Python source code string f : callable This takes a tuple of (toknum, tokval) as its argument and returns a tuple with the same structure but possibly different elements. Defaults to the composition of ``_rewrite_assign``, ``_replace_booleans``, and ``_replace_locals``. Returns ------- str Valid Python source code Notes ----- The `f` parameter can be any callable that takes *and* returns input of the form ``(toknum, tokval)``, where ``toknum`` is one of the constants from the ``tokenize`` module and ``tokval`` is a string.
Here is the function:
def _preparse(
source: str,
f=_compose(
_replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks
),
) -> str:
"""
Compose a collection of tokenization functions.
Parameters
----------
source : str
A Python source code string
f : callable
This takes a tuple of (toknum, tokval) as its argument and returns a
tuple with the same structure but possibly different elements. Defaults
to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
``_replace_locals``.
Returns
-------
str
Valid Python source code
Notes
-----
The `f` parameter can be any callable that takes *and* returns input of the
form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
the ``tokenize`` module and ``tokval`` is a string.
"""
assert callable(f), "f must be callable"
return tokenize.untokenize(f(x) for x in tokenize_string(source)) | Compose a collection of tokenization functions. Parameters ---------- source : str A Python source code string f : callable This takes a tuple of (toknum, tokval) as its argument and returns a tuple with the same structure but possibly different elements. Defaults to the composition of ``_rewrite_assign``, ``_replace_booleans``, and ``_replace_locals``. Returns ------- str Valid Python source code Notes ----- The `f` parameter can be any callable that takes *and* returns input of the form ``(toknum, tokval)``, where ``toknum`` is one of the constants from the ``tokenize`` module and ``tokval`` is a string. |
173,248 | from __future__ import annotations
import ast
from functools import (
partial,
reduce,
)
from keyword import iskeyword
import tokenize
from typing import (
Callable,
TypeVar,
)
import numpy as np
from pandas.compat import PY39
from pandas.errors import UndefinedVariableError
import pandas.core.common as com
from pandas.core.computation.ops import (
ARITH_OPS_SYMS,
BOOL_OPS_SYMS,
CMP_OPS_SYMS,
LOCAL_TAG,
MATHOPS,
REDUCTIONS,
UNARY_OPS_SYMS,
BinOp,
Constant,
Div,
FuncNode,
Op,
Term,
UnaryOp,
is_term,
)
from pandas.core.computation.parsing import (
clean_backtick_quoted_toks,
tokenize_string,
)
from pandas.core.computation.scope import Scope
from pandas.io.formats import printing
The provided code snippet includes necessary dependencies for implementing the `_is_type` function. Write a Python function `def _is_type(t)` to solve the following problem:
Factory for a type checking function of type ``t`` or tuple of types.
Here is the function:
def _is_type(t):
"""
Factory for a type checking function of type ``t`` or tuple of types.
"""
return lambda x: isinstance(x.value, t) | Factory for a type checking function of type ``t`` or tuple of types. |
173,249 | from __future__ import annotations
import ast
from functools import (
partial,
reduce,
)
from keyword import iskeyword
import tokenize
from typing import (
Callable,
TypeVar,
)
import numpy as np
from pandas.compat import PY39
from pandas.errors import UndefinedVariableError
import pandas.core.common as com
from pandas.core.computation.ops import (
ARITH_OPS_SYMS,
BOOL_OPS_SYMS,
CMP_OPS_SYMS,
LOCAL_TAG,
MATHOPS,
REDUCTIONS,
UNARY_OPS_SYMS,
BinOp,
Constant,
Div,
FuncNode,
Op,
Term,
UnaryOp,
is_term,
)
from pandas.core.computation.parsing import (
clean_backtick_quoted_toks,
tokenize_string,
)
from pandas.core.computation.scope import Scope
from pandas.io.formats import printing
_all_nodes = frozenset(
node
for node in (getattr(ast, name) for name in dir(ast))
if isinstance(node, type) and issubclass(node, ast.AST)
)
The provided code snippet includes necessary dependencies for implementing the `_filter_nodes` function. Write a Python function `def _filter_nodes(superclass, all_nodes=_all_nodes)` to solve the following problem:
Filter out AST nodes that are subclasses of ``superclass``.
Here is the function:
def _filter_nodes(superclass, all_nodes=_all_nodes):
"""
Filter out AST nodes that are subclasses of ``superclass``.
"""
node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass))
return frozenset(node_names) | Filter out AST nodes that are subclasses of ``superclass``. |
173,250 | from __future__ import annotations
import ast
from functools import (
partial,
reduce,
)
from keyword import iskeyword
import tokenize
from typing import (
Callable,
TypeVar,
)
import numpy as np
from pandas.compat import PY39
from pandas.errors import UndefinedVariableError
import pandas.core.common as com
from pandas.core.computation.ops import (
ARITH_OPS_SYMS,
BOOL_OPS_SYMS,
CMP_OPS_SYMS,
LOCAL_TAG,
MATHOPS,
REDUCTIONS,
UNARY_OPS_SYMS,
BinOp,
Constant,
Div,
FuncNode,
Op,
Term,
UnaryOp,
is_term,
)
from pandas.core.computation.parsing import (
clean_backtick_quoted_toks,
tokenize_string,
)
from pandas.core.computation.scope import Scope
from pandas.io.formats import printing
def _node_not_implemented(node_name: str) -> Callable[..., None]:
"""
Return a function that raises a NotImplementedError with a passed node name.
"""
def f(self, *args, **kwargs):
raise NotImplementedError(f"'{node_name}' nodes are not implemented")
return f
_T = TypeVar("_T")
class Callable(BaseTypingInstance):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
def py__get__(self, instance, class_value):
return ValueSet([self])
The provided code snippet includes necessary dependencies for implementing the `disallow` function. Write a Python function `def disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]` to solve the following problem:
Decorator to disallow certain nodes from parsing. Raises a NotImplementedError instead. Returns ------- callable
Here is the function:
def disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]:
"""
Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Returns
-------
callable
"""
def disallowed(cls: type[_T]) -> type[_T]:
# error: "Type[_T]" has no attribute "unsupported_nodes"
cls.unsupported_nodes = () # type: ignore[attr-defined]
for node in nodes:
new_method = _node_not_implemented(node)
name = f"visit_{node}"
# error: "Type[_T]" has no attribute "unsupported_nodes"
cls.unsupported_nodes += (name,) # type: ignore[attr-defined]
setattr(cls, name, new_method)
return cls
return disallowed | Decorator to disallow certain nodes from parsing. Raises a NotImplementedError instead. Returns ------- callable |
173,251 | from __future__ import annotations
import ast
from functools import (
partial,
reduce,
)
from keyword import iskeyword
import tokenize
from typing import (
Callable,
TypeVar,
)
import numpy as np
from pandas.compat import PY39
from pandas.errors import UndefinedVariableError
import pandas.core.common as com
from pandas.core.computation.ops import (
ARITH_OPS_SYMS,
BOOL_OPS_SYMS,
CMP_OPS_SYMS,
LOCAL_TAG,
MATHOPS,
REDUCTIONS,
UNARY_OPS_SYMS,
BinOp,
Constant,
Div,
FuncNode,
Op,
Term,
UnaryOp,
is_term,
)
from pandas.core.computation.parsing import (
clean_backtick_quoted_toks,
tokenize_string,
)
from pandas.core.computation.scope import Scope
from pandas.io.formats import printing
def _op_maker(op_class, op_symbol):
"""
Return a function to create an op class with its symbol already passed.
Returns
-------
callable
"""
def f(self, node, *args, **kwargs):
"""
Return a partial function with an Op subclass with an operator already passed.
Returns
-------
callable
"""
return partial(op_class, op_symbol, *args, **kwargs)
return f
The provided code snippet includes necessary dependencies for implementing the `add_ops` function. Write a Python function `def add_ops(op_classes)` to solve the following problem:
Decorator to add default implementation of ops.
Here is the function:
def add_ops(op_classes):
"""
Decorator to add default implementation of ops.
"""
def f(cls):
for op_attr_name, op_class in op_classes.items():
ops = getattr(cls, f"{op_attr_name}_ops")
ops_map = getattr(cls, f"{op_attr_name}_op_nodes_map")
for op in ops:
op_node = ops_map[op]
if op_node is not None:
made_op = _op_maker(op_class, op)
setattr(cls, f"visit_{op_node}", made_op)
return cls
return f | Decorator to add default implementation of ops. |
173,252 | from __future__ import annotations
import datetime
import inspect
from io import StringIO
import itertools
import pprint
import struct
import sys
from typing import (
ChainMap,
TypeVar,
)
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.errors import UndefinedVariableError
def _replacer(x) -> str:
"""
Replace a number with its hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin)
The provided code snippet includes necessary dependencies for implementing the `_raw_hex_id` function. Write a Python function `def _raw_hex_id(obj) -> str` to solve the following problem:
Return the padded hexadecimal id of ``obj``.
Here is the function:
def _raw_hex_id(obj) -> str:
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack("@P", id(obj))
return "".join([_replacer(x) for x in packed]) | Return the padded hexadecimal id of ``obj``. |
173,253 | from __future__ import annotations
import datetime
import inspect
from io import StringIO
import itertools
import pprint
import struct
import sys
from typing import (
ChainMap,
TypeVar,
)
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.errors import UndefinedVariableError
class StringIO(TextIOWrapper):
def __init__(self, initial_value: Optional[str] = ..., newline: Optional[str] = ...) -> None: ...
# StringIO does not contain a "name" field. This workaround is necessary
# to allow StringIO sub-classes to add this field, as it is defined
# as a read-only property on IO[].
name: Any
def getvalue(self) -> str: ...
The provided code snippet includes necessary dependencies for implementing the `_get_pretty_string` function. Write a Python function `def _get_pretty_string(obj) -> str` to solve the following problem:
Return a prettier version of obj. Parameters ---------- obj : object Object to pretty print Returns ------- str Pretty print object repr
Here is the function:
def _get_pretty_string(obj) -> str:
"""
Return a prettier version of obj.
Parameters
----------
obj : object
Object to pretty print
Returns
-------
str
Pretty print object repr
"""
sio = StringIO()
pprint.pprint(obj, stream=sio)
return sio.getvalue() | Return a prettier version of obj. Parameters ---------- obj : object Object to pretty print Returns ------- str Pretty print object repr |
173,254 | from __future__ import annotations
import abc
from typing import TYPE_CHECKING
from pandas.errors import NumExprClobberingError
from pandas.core.computation.align import (
align_terms,
reconstruct_object,
)
from pandas.core.computation.ops import (
MATHOPS,
REDUCTIONS,
)
from pandas.io.formats import printing
_ne_builtins = frozenset(MATHOPS + REDUCTIONS)
class NumExprClobberingError(NameError):
"""
Exception raised when trying to use a built-in numexpr name as a variable name.
``eval`` or ``query`` will throw the error if the engine is set
to 'numexpr'. 'numexpr' is the default engine value for these methods if the
numexpr package is installed.
Examples
--------
>>> df = pd.DataFrame({'abs': [1, 1, 1]})
>>> df.query("abs > 2") # doctest: +SKIP
... # NumExprClobberingError: Variables in expression "(abs) > (2)" overlap...
>>> sin, a = 1, 2
>>> pd.eval("sin + a", engine='numexpr') # doctest: +SKIP
... # NumExprClobberingError: Variables in expression "(sin) + (a)" overlap...
"""
)
class Expr:
"""
Object encapsulating an expression.
Parameters
----------
expr : str
engine : str, optional, default 'numexpr'
parser : str, optional, default 'pandas'
env : Scope, optional, default None
level : int, optional, default 2
"""
env: Scope
engine: str
parser: str
def __init__(
self,
expr,
engine: str = "numexpr",
parser: str = "pandas",
env: Scope | None = None,
level: int = 0,
) -> None:
self.expr = expr
self.env = env or Scope(level=level + 1)
self.engine = engine
self.parser = parser
self._visitor = PARSERS[parser](self.env, self.engine, self.parser)
self.terms = self.parse()
def assigner(self):
return getattr(self._visitor, "assigner", None)
def __call__(self):
return self.terms(self.env)
def __repr__(self) -> str:
return printing.pprint_thing(self.terms)
def __len__(self) -> int:
return len(self.expr)
def parse(self):
"""
Parse an expression.
"""
return self._visitor.visit(self.expr)
def names(self):
"""
Get the names in an expression.
"""
if is_term(self.terms):
return frozenset([self.terms.name])
return frozenset(term.name for term in com.flatten(self.terms))
The provided code snippet includes necessary dependencies for implementing the `_check_ne_builtin_clash` function. Write a Python function `def _check_ne_builtin_clash(expr: Expr) -> None` to solve the following problem:
Attempt to prevent foot-shooting in a helpful way. Parameters ---------- expr : Expr Terms can contain
Here is the function:
def _check_ne_builtin_clash(expr: Expr) -> None:
"""
Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
expr : Expr
Terms can contain
"""
names = expr.names
overlap = names & _ne_builtins
if overlap:
s = ", ".join([repr(x) for x in overlap])
raise NumExprClobberingError(
f'Variables in expression "{expr}" overlap with builtins: ({s})'
) | Attempt to prevent foot-shooting in a helpful way. Parameters ---------- expr : Expr Terms can contain |
173,255 | from __future__ import annotations
from io import StringIO
from keyword import iskeyword
import token
import tokenize
from typing import (
Hashable,
Iterator,
)
def create_valid_python_identifier(name: str) -> str:
"""
Create valid Python identifiers from any string.
Check if name contains any special characters. If it contains any
special characters, the special characters will be replaced by
a special string and a prefix is added.
Raises
------
SyntaxError
If the returned name is not a Python valid identifier, raise an exception.
This can happen if there is a hashtag in the name, as the tokenizer will
than terminate and not find the backtick.
But also for characters that fall out of the range of (U+0001..U+007F).
"""
if name.isidentifier() and not iskeyword(name):
return name
# Create a dict with the special characters and their replacement string.
# EXACT_TOKEN_TYPES contains these special characters
# token.tok_name contains a readable description of the replacement string.
special_characters_replacements = {
char: f"_{token.tok_name[tokval]}_"
for char, tokval in (tokenize.EXACT_TOKEN_TYPES.items())
}
special_characters_replacements.update(
{
" ": "_",
"?": "_QUESTIONMARK_",
"!": "_EXCLAMATIONMARK_",
"$": "_DOLLARSIGN_",
"€": "_EUROSIGN_",
"°": "_DEGREESIGN_",
# Including quotes works, but there are exceptions.
"'": "_SINGLEQUOTE_",
'"': "_DOUBLEQUOTE_",
# Currently not possible. Terminates parser and won't find backtick.
# "#": "_HASH_",
}
)
name = "".join([special_characters_replacements.get(char, char) for char in name])
name = f"BACKTICK_QUOTED_STRING_{name}"
if not name.isidentifier():
raise SyntaxError(f"Could not convert '{name}' to a valid Python identifier.")
return name
def tokenize_string(source: str) -> Iterator[tuple[int, str]]:
"""
Tokenize a Python source code string.
Parameters
----------
source : str
The Python source code string.
Returns
-------
tok_generator : Iterator[Tuple[int, str]]
An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]).
"""
line_reader = StringIO(source).readline
token_generator = tokenize.generate_tokens(line_reader)
# Loop over all tokens till a backtick (`) is found.
# Then, take all tokens till the next backtick to form a backtick quoted string
for toknum, tokval, start, _, _ in token_generator:
if tokval == "`":
try:
yield tokenize_backtick_quoted_string(
token_generator, source, string_start=start[1] + 1
)
except Exception as err:
raise SyntaxError(f"Failed to parse backticks in '{source}'.") from err
else:
yield toknum, tokval
class Hashable(Protocol, metaclass=ABCMeta):
# TODO: This is special, in that a subclass of a hashable class may not be hashable
# (for example, list vs. object). It's not obvious how to represent this. This class
# is currently mostly useless for static checking.
def __hash__(self) -> int: ...
The provided code snippet includes necessary dependencies for implementing the `clean_column_name` function. Write a Python function `def clean_column_name(name: Hashable) -> Hashable` to solve the following problem:
Function to emulate the cleaning of a backtick quoted name. The purpose for this function is to see what happens to the name of identifier if it goes to the process of being parsed a Python code inside a backtick quoted string and than being cleaned (removed of any special characters). Parameters ---------- name : hashable Name to be cleaned. Returns ------- name : hashable Returns the name after tokenizing and cleaning. Notes ----- For some cases, a name cannot be converted to a valid Python identifier. In that case :func:`tokenize_string` raises a SyntaxError. In that case, we just return the name unmodified. If this name was used in the query string (this makes the query call impossible) an error will be raised by :func:`tokenize_backtick_quoted_string` instead, which is not caught and propagates to the user level.
Here is the function:
def clean_column_name(name: Hashable) -> Hashable:
"""
Function to emulate the cleaning of a backtick quoted name.
The purpose for this function is to see what happens to the name of
identifier if it goes to the process of being parsed a Python code
inside a backtick quoted string and than being cleaned
(removed of any special characters).
Parameters
----------
name : hashable
Name to be cleaned.
Returns
-------
name : hashable
Returns the name after tokenizing and cleaning.
Notes
-----
For some cases, a name cannot be converted to a valid Python identifier.
In that case :func:`tokenize_string` raises a SyntaxError.
In that case, we just return the name unmodified.
If this name was used in the query string (this makes the query call impossible)
an error will be raised by :func:`tokenize_backtick_quoted_string` instead,
which is not caught and propagates to the user level.
"""
try:
tokenized = tokenize_string(f"`{name}`")
tokval = next(tokenized)[1]
return create_valid_python_identifier(tokval)
except SyntaxError:
return name | Function to emulate the cleaning of a backtick quoted name. The purpose for this function is to see what happens to the name of identifier if it goes to the process of being parsed a Python code inside a backtick quoted string and than being cleaned (removed of any special characters). Parameters ---------- name : hashable Name to be cleaned. Returns ------- name : hashable Returns the name after tokenizing and cleaning. Notes ----- For some cases, a name cannot be converted to a valid Python identifier. In that case :func:`tokenize_string` raises a SyntaxError. In that case, we just return the name unmodified. If this name was used in the query string (this makes the query call impossible) an error will be raised by :func:`tokenize_backtick_quoted_string` instead, which is not caught and propagates to the user level. |
173,256 | from __future__ import annotations
import operator
import warnings
import numpy as np
from pandas._config import get_option
from pandas._typing import FuncType
from pandas.util._exceptions import find_stack_level
from pandas.core.computation.check import NUMEXPR_INSTALLED
from pandas.core.ops import roperator
if NUMEXPR_INSTALLED:
import numexpr as ne
USE_NUMEXPR = NUMEXPR_INSTALLED
NUMEXPR_INSTALLED = ne is not None
if NUMEXPR_INSTALLED:
NUMEXPR_VERSION = ne.__version__
else:
NUMEXPR_VERSION = None
def set_numexpr_threads(n=None) -> None:
# if we are using numexpr, set the threads to n
# otherwise reset
if NUMEXPR_INSTALLED and USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n) | null |
173,257 | from __future__ import annotations
import operator
import warnings
import numpy as np
from pandas._config import get_option
from pandas._typing import FuncType
from pandas.util._exceptions import find_stack_level
from pandas.core.computation.check import NUMEXPR_INSTALLED
from pandas.core.ops import roperator
_TEST_MODE: bool | None = None
_TEST_RESULT: list[bool] = []
The provided code snippet includes necessary dependencies for implementing the `set_test_mode` function. Write a Python function `def set_test_mode(v: bool = True) -> None` to solve the following problem:
Keeps track of whether numexpr was used. Stores an additional ``True`` for every successful use of evaluate with numexpr since the last ``get_test_result``.
Here is the function:
def set_test_mode(v: bool = True) -> None:
"""
Keeps track of whether numexpr was used.
Stores an additional ``True`` for every successful use of evaluate with
numexpr since the last ``get_test_result``.
"""
global _TEST_MODE, _TEST_RESULT
_TEST_MODE = v
_TEST_RESULT = [] | Keeps track of whether numexpr was used. Stores an additional ``True`` for every successful use of evaluate with numexpr since the last ``get_test_result``. |
173,258 | from __future__ import annotations
import operator
import warnings
import numpy as np
from pandas._config import get_option
from pandas._typing import FuncType
from pandas.util._exceptions import find_stack_level
from pandas.core.computation.check import NUMEXPR_INSTALLED
from pandas.core.ops import roperator
_TEST_RESULT: list[bool] = []
The provided code snippet includes necessary dependencies for implementing the `get_test_result` function. Write a Python function `def get_test_result() -> list[bool]` to solve the following problem:
Get test result and reset test_results.
Here is the function:
def get_test_result() -> list[bool]:
"""
Get test result and reset test_results.
"""
global _TEST_RESULT
res = _TEST_RESULT
_TEST_RESULT = []
return res | Get test result and reset test_results. |
173,259 | from __future__ import annotations
import ast
from functools import partial
from typing import Any
import numpy as np
from pandas._libs.tslibs import (
Timedelta,
Timestamp,
)
from pandas._typing import npt
from pandas.errors import UndefinedVariableError
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
from pandas.core.computation import (
expr,
ops,
scope as _scope,
)
from pandas.core.computation.common import ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import is_term
from pandas.core.construction import extract_array
from pandas.core.indexes.base import Index
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
class PyTablesExpr(expr.Expr):
"""
Hold a pytables-like expression, comprised of possibly multiple 'terms'.
Parameters
----------
where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
a PyTablesExpr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
_visitor: PyTablesExprVisitor | None
env: PyTablesScope
expr: str
def __init__(
self,
where,
queryables: dict[str, Any] | None = None,
encoding=None,
scope_level: int = 0,
) -> None:
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict: _scope.DeepChainMap[Any, Any] | None = None
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif is_list_like(where):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join([f"({w})" for w in com.flatten(where)])
else:
# _validate_where ensures we otherwise have a string
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
"""create and return the numexpr condition and filter"""
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
) from err
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
) from err
return self.condition, self.filter
The provided code snippet includes necessary dependencies for implementing the `_validate_where` function. Write a Python function `def _validate_where(w)` to solve the following problem:
Validate that the where statement is of the right type. The type may either be String, Expr, or list-like of Exprs. Parameters ---------- w : String term expression, Expr, or list-like of Exprs. Returns ------- where : The original where clause if the check was successful. Raises ------ TypeError : An invalid data type was passed in for w (e.g. dict).
Here is the function:
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w | Validate that the where statement is of the right type. The type may either be String, Expr, or list-like of Exprs. Parameters ---------- w : String term expression, Expr, or list-like of Exprs. Returns ------- where : The original where clause if the check was successful. Raises ------ TypeError : An invalid data type was passed in for w (e.g. dict). |
173,260 | from __future__ import annotations
from datetime import datetime
from functools import partial
import operator
from typing import (
Callable,
Iterable,
Iterator,
Literal,
)
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.core.dtypes.common import (
is_list_like,
is_scalar,
)
import pandas.core.common as com
from pandas.core.computation.common import (
ensure_decoded,
result_type_many,
)
from pandas.core.computation.scope import DEFAULT_GLOBALS
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
The provided code snippet includes necessary dependencies for implementing the `_in` function. Write a Python function `def _in(x, y)` to solve the following problem:
Compute the vectorized membership of ``x in y`` if possible, otherwise use Python.
Here is the function:
def _in(x, y):
"""
Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y | Compute the vectorized membership of ``x in y`` if possible, otherwise use Python. |
173,261 | from __future__ import annotations
from datetime import datetime
from functools import partial
import operator
from typing import (
Callable,
Iterable,
Iterator,
Literal,
)
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.core.dtypes.common import (
is_list_like,
is_scalar,
)
import pandas.core.common as com
from pandas.core.computation.common import (
ensure_decoded,
result_type_many,
)
from pandas.core.computation.scope import DEFAULT_GLOBALS
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
The provided code snippet includes necessary dependencies for implementing the `_not_in` function. Write a Python function `def _not_in(x, y)` to solve the following problem:
Compute the vectorized membership of ``x not in y`` if possible, otherwise use Python.
Here is the function:
def _not_in(x, y):
"""
Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y | Compute the vectorized membership of ``x not in y`` if possible, otherwise use Python. |
173,262 | from __future__ import annotations
from datetime import datetime
from functools import partial
import operator
from typing import (
Callable,
Iterable,
Iterator,
Literal,
)
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.core.dtypes.common import (
is_list_like,
is_scalar,
)
import pandas.core.common as com
from pandas.core.computation.common import (
ensure_decoded,
result_type_many,
)
from pandas.core.computation.scope import DEFAULT_GLOBALS
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
The provided code snippet includes necessary dependencies for implementing the `_cast_inplace` function. Write a Python function `def _cast_inplace(terms, acceptable_dtypes, dtype) -> None` to solve the following problem:
Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. dtype : str or numpy.dtype The dtype to cast to.
Here is the function:
def _cast_inplace(terms, acceptable_dtypes, dtype) -> None:
"""
Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value) | Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. dtype : str or numpy.dtype The dtype to cast to. |
173,263 | from __future__ import annotations
from datetime import datetime
from functools import partial
import operator
from typing import (
Callable,
Iterable,
Iterator,
Literal,
)
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.core.dtypes.common import (
is_list_like,
is_scalar,
)
import pandas.core.common as com
from pandas.core.computation.common import (
ensure_decoded,
result_type_many,
)
from pandas.core.computation.scope import DEFAULT_GLOBALS
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
class Term:
def __new__(cls, name, env, side=None, encoding=None):
def __init__(self, name, env, side=None, encoding=None) -> None:
def local_name(self) -> str:
def __repr__(self) -> str:
def __call__(self, *args, **kwargs):
def evaluate(self, *args, **kwargs) -> Term:
def _resolve_name(self):
def update(self, value) -> None:
def is_scalar(self) -> bool:
def type(self):
def raw(self) -> str:
def is_datetime(self) -> bool:
def value(self):
def value(self, new_value) -> None:
def name(self):
def ndim(self) -> int:
def is_term(obj) -> bool:
return isinstance(obj, Term) | null |
173,264 | from __future__ import annotations
from datetime import datetime
from functools import partial
import operator
from typing import (
Callable,
Iterable,
Iterator,
Literal,
)
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.core.dtypes.common import (
is_list_like,
is_scalar,
)
import pandas.core.common as com
from pandas.core.computation.common import (
ensure_decoded,
result_type_many,
)
from pandas.core.computation.scope import DEFAULT_GLOBALS
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
def isnumeric(dtype) -> bool:
return issubclass(np.dtype(dtype).type, np.number) | null |
173,265 | from __future__ import annotations
from functools import reduce
import numpy as np
from pandas._config import get_option
The provided code snippet includes necessary dependencies for implementing the `ensure_decoded` function. Write a Python function `def ensure_decoded(s) -> str` to solve the following problem:
If we have bytes, decode them to unicode.
Here is the function:
def ensure_decoded(s) -> str:
"""
If we have bytes, decode them to unicode.
"""
if isinstance(s, (np.bytes_, bytes)):
s = s.decode(get_option("display.encoding"))
return s | If we have bytes, decode them to unicode. |
173,266 | from __future__ import annotations
import tokenize
from typing import TYPE_CHECKING
import warnings
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.computation.engines import ENGINES
from pandas.core.computation.expr import (
PARSERS,
Expr,
)
from pandas.core.computation.parsing import tokenize_string
from pandas.core.computation.scope import ensure_scope
from pandas.core.generic import NDFrame
from pandas.io.formats.printing import pprint_thing
def _check_engine(engine: str | None) -> str:
"""
Make sure a valid engine is passed.
Parameters
----------
engine : str
String to validate.
Raises
------
KeyError
* If an invalid engine is passed.
ImportError
* If numexpr was requested but doesn't exist.
Returns
-------
str
Engine name.
"""
from pandas.core.computation.check import NUMEXPR_INSTALLED
from pandas.core.computation.expressions import USE_NUMEXPR
if engine is None:
engine = "numexpr" if USE_NUMEXPR else "python"
if engine not in ENGINES:
valid_engines = list(ENGINES.keys())
raise KeyError(
f"Invalid engine '{engine}' passed, valid engines are {valid_engines}"
)
# TODO: validate this in a more general way (thinking of future engines
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
if engine == "numexpr" and not NUMEXPR_INSTALLED:
raise ImportError(
"'numexpr' is not installed or an unsupported version. Cannot use "
"engine='numexpr' for query/eval if 'numexpr' is not installed"
)
return engine
def _check_parser(parser: str):
"""
Make sure a valid parser is passed.
Parameters
----------
parser : str
Raises
------
KeyError
* If an invalid parser is passed
"""
if parser not in PARSERS:
raise KeyError(
f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}"
)
def _check_resolvers(resolvers):
if resolvers is not None:
for resolver in resolvers:
if not hasattr(resolver, "__getitem__"):
name = type(resolver).__name__
raise TypeError(
f"Resolver of type '{name}' does not "
"implement the __getitem__ method"
)
def _check_expression(expr):
"""
Make sure an expression is not an empty string
Parameters
----------
expr : object
An object that can be converted to a string
Raises
------
ValueError
* If expr is an empty string
"""
if not expr:
raise ValueError("expr cannot be an empty string")
def _convert_expression(expr) -> str:
"""
Convert an object to an expression.
This function converts an object to an expression (a unicode string) and
checks to make sure it isn't empty after conversion. This is used to
convert operators to their string representation for recursive calls to
:func:`~pandas.eval`.
Parameters
----------
expr : object
The object to be converted to a string.
Returns
-------
str
The string representation of an object.
Raises
------
ValueError
* If the expression is empty.
"""
s = pprint_thing(expr)
_check_expression(s)
return s
def _check_for_locals(expr: str, stack_level: int, parser: str):
at_top_of_stack = stack_level == 0
not_pandas_parser = parser != "pandas"
if not_pandas_parser:
msg = "The '@' prefix is only supported by the pandas parser"
elif at_top_of_stack:
msg = (
"The '@' prefix is not allowed in top-level eval calls.\n"
"please refer to your variables by name without the '@' prefix."
)
if at_top_of_stack or not_pandas_parser:
for toknum, tokval in tokenize_string(expr):
if toknum == tokenize.OP and tokval == "@":
raise SyntaxError(msg)
def find_stack_level() -> int:
"""
Find the first place in the stack that is not inside pandas
(tests notwithstanding).
"""
import pandas as pd
pkg_dir = os.path.dirname(pd.__file__)
test_dir = os.path.join(pkg_dir, "tests")
# https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
frame = inspect.currentframe()
n = 0
while frame:
fname = inspect.getfile(frame)
if fname.startswith(pkg_dir) and not fname.startswith(test_dir):
frame = frame.f_back
n += 1
else:
break
return n
def validate_bool_kwarg(
value: BoolishNoneT, arg_name, none_allowed: bool = True, int_allowed: bool = False
) -> BoolishNoneT:
"""
Ensure that argument passed in arg_name can be interpreted as boolean.
Parameters
----------
value : bool
Value to be validated.
arg_name : str
Name of the argument. To be reflected in the error message.
none_allowed : bool, default True
Whether to consider None to be a valid boolean.
int_allowed : bool, default False
Whether to consider integer value to be a valid boolean.
Returns
-------
value
The same value as input.
Raises
------
ValueError
If the value is not a valid boolean.
"""
good_value = is_bool(value)
if none_allowed:
good_value = good_value or value is None
if int_allowed:
good_value = good_value or isinstance(value, int)
if not good_value:
raise ValueError(
f'For argument "{arg_name}" expected type bool, received '
f"type {type(value).__name__}."
)
return value
def is_extension_array_dtype(arr_or_dtype) -> bool:
"""
Check if an object is a pandas extension array type.
See the :ref:`Use Guide <extending.extension-types>` for more.
Parameters
----------
arr_or_dtype : object
For array-like input, the ``.dtype`` attribute will
be extracted.
Returns
-------
bool
Whether the `arr_or_dtype` is an extension array type.
Notes
-----
This checks whether an object implements the pandas extension
array interface. In pandas, this includes:
* Categorical
* Sparse
* Interval
* Period
* DatetimeArray
* TimedeltaArray
Third-party libraries may implement arrays or types satisfying
this interface as well.
Examples
--------
>>> from pandas.api.types import is_extension_array_dtype
>>> arr = pd.Categorical(['a', 'b'])
>>> is_extension_array_dtype(arr)
True
>>> is_extension_array_dtype(arr.dtype)
True
>>> arr = np.array(['a', 'b'])
>>> is_extension_array_dtype(arr.dtype)
False
"""
dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype)
if isinstance(dtype, ExtensionDtype):
return True
elif isinstance(dtype, np.dtype):
return False
else:
return registry.find(dtype) is not None
ENGINES: dict[str, type[AbstractEngine]] = {
"numexpr": NumExprEngine,
"python": PythonEngine,
}
)
class Expr:
"""
Object encapsulating an expression.
Parameters
----------
expr : str
engine : str, optional, default 'numexpr'
parser : str, optional, default 'pandas'
env : Scope, optional, default None
level : int, optional, default 2
"""
env: Scope
engine: str
parser: str
def __init__(
self,
expr,
engine: str = "numexpr",
parser: str = "pandas",
env: Scope | None = None,
level: int = 0,
) -> None:
self.expr = expr
self.env = env or Scope(level=level + 1)
self.engine = engine
self.parser = parser
self._visitor = PARSERS[parser](self.env, self.engine, self.parser)
self.terms = self.parse()
def assigner(self):
return getattr(self._visitor, "assigner", None)
def __call__(self):
return self.terms(self.env)
def __repr__(self) -> str:
return printing.pprint_thing(self.terms)
def __len__(self) -> int:
return len(self.expr)
def parse(self):
"""
Parse an expression.
"""
return self._visitor.visit(self.expr)
def names(self):
"""
Get the names in an expression.
"""
if is_term(self.terms):
return frozenset([self.terms.name])
return frozenset(term.name for term in com.flatten(self.terms))
def ensure_scope(
level: int, global_dict=None, local_dict=None, resolvers=(), target=None
) -> Scope:
"""Ensure that we are grabbing the correct scope."""
return Scope(
level + 1,
global_dict=global_dict,
local_dict=local_dict,
resolvers=resolvers,
target=target,
)
class NDFrame(PandasObject, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: list[str] = [
"_mgr",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
"_flags",
]
_internal_names_set: set[str] = set(_internal_names)
_accessors: set[str] = set()
_hidden_attrs: frozenset[str] = frozenset([])
_metadata: list[str] = []
_is_copy: weakref.ReferenceType[NDFrame] | None = None
_mgr: Manager
_attrs: dict[Hashable, Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: Manager,
copy: bool_t = False,
attrs: Mapping[Hashable, Any] | None = None,
) -> None:
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_mgr", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True))
def _init_mgr(
cls,
mgr: Manager,
axes,
dtype: Dtype | None = None,
copy: bool_t = False,
) -> Manager:
"""passed a manager and a axes dict"""
for a, axe in axes.items():
if axe is not None:
axe = ensure_index(axe)
bm_axis = cls._get_block_manager_axis(a)
mgr = mgr.reindex_axis(axe, axis=bm_axis)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if (
isinstance(mgr, BlockManager)
and len(mgr.blocks) == 1
and is_dtype_equal(mgr.blocks[0].values.dtype, dtype)
):
pass
else:
mgr = mgr.astype(dtype=dtype)
return mgr
def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT:
"""
Private helper function to create a DataFrame with specific manager.
Parameters
----------
typ : {"block", "array"}
copy : bool, default True
Only controls whether the conversion from Block->ArrayManager
copies the 1D arrays (to ensure proper/contiguous memory layout).
Returns
-------
DataFrame
New DataFrame using specified manager type. Is not guaranteed
to be a copy or not.
"""
new_mgr: Manager
new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy)
# fastpath of passing a manager doesn't check the option/manager class
return self._constructor(new_mgr).__finalize__(self)
# ----------------------------------------------------------------------
# attrs and flags
def attrs(self) -> dict[Hashable, Any]:
"""
Dictionary of global attributes of this dataset.
.. warning::
attrs is experimental and may change without warning.
See Also
--------
DataFrame.flags : Global flags applying to this object.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
def flags(self) -> Flags:
"""
Get the properties associated with this pandas object.
The available flags are
* :attr:`Flags.allows_duplicate_labels`
See Also
--------
Flags : Flags that apply to pandas objects.
DataFrame.attrs : Global metadata applying to this dataset.
Notes
-----
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags
<Flags(allows_duplicate_labels=True)>
Flags can be get or set using ``.``
>>> df.flags.allows_duplicate_labels
True
>>> df.flags.allows_duplicate_labels = False
Or by slicing with a key
>>> df.flags["allows_duplicate_labels"]
False
>>> df.flags["allows_duplicate_labels"] = True
"""
return self._flags
def set_flags(
self: NDFrameT,
*,
copy: bool_t = False,
allows_duplicate_labels: bool_t | None = None,
) -> NDFrameT:
"""
Return a new object with updated flags.
Parameters
----------
copy : bool, default False
Specify if a copy of the object should be made.
allows_duplicate_labels : bool, optional
Whether the returned object allows duplicate labels.
Returns
-------
Series or DataFrame
The same type as the caller.
See Also
--------
DataFrame.attrs : Global metadata applying to this dataset.
DataFrame.flags : Global flags applying to this object.
Notes
-----
This method returns a new object that's a view on the same data
as the input. Mutating the input or the output values will be reflected
in the other.
This method is intended to be used in method chains.
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags.allows_duplicate_labels
True
>>> df2 = df.set_flags(allows_duplicate_labels=False)
>>> df2.flags.allows_duplicate_labels
False
"""
df = self.copy(deep=copy and not using_copy_on_write())
if allows_duplicate_labels is not None:
df.flags["allows_duplicate_labels"] = allows_duplicate_labels
return df
def _validate_dtype(cls, dtype) -> DtypeObj | None:
"""validate the passed dtype"""
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]:
"""
Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
# ----------------------------------------------------------------------
# Internals
def _data(self):
# GH#33054 retained because some downstream packages uses this,
# e.g. fastparquet
return self._mgr
# ----------------------------------------------------------------------
# Axis
_stat_axis_number = 0
_stat_axis_name = "index"
_AXIS_ORDERS: list[Literal["index", "columns"]]
_AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, "index": 0, "rows": 0}
_info_axis_number: int
_info_axis_name: Literal["index", "columns"]
_AXIS_LEN: int
def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
# error: Argument 1 to "update" of "MutableMapping" has incompatible type
# "Dict[str, Any]"; expected "SupportsKeysAndGetItem[Union[int, str], Any]"
d.update(kwargs) # type: ignore[arg-type]
return d
def _get_axis_number(cls, axis: Axis) -> AxisInt:
try:
return cls._AXIS_TO_AXIS_NUMBER[axis]
except KeyError:
raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
def _get_axis_name(cls, axis: Axis) -> Literal["index", "columns"]:
axis_number = cls._get_axis_number(axis)
return cls._AXIS_ORDERS[axis_number]
def _get_axis(self, axis: Axis) -> Index:
axis_number = self._get_axis_number(axis)
assert axis_number in {0, 1}
return self.index if axis_number == 0 else self.columns
def _get_block_manager_axis(cls, axis: Axis) -> AxisInt:
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
ndim = cls._AXIS_LEN
if ndim == 2:
# i.e. DataFrame
return 1 - axis
return axis
def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]:
# index or columns
axis_index = getattr(self, axis)
d = {}
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]:
from pandas.core.computation.parsing import clean_column_name
d: dict[str, Series | MultiIndex] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
def _info_axis(self) -> Index:
return getattr(self, self._info_axis_name)
def _stat_axis(self) -> Index:
return getattr(self, self._stat_axis_name)
def shape(self) -> tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
def axes(self) -> list[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._mgr.ndim
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
# error: Incompatible return value type (got "signedinteger[_64Bit]",
# expected "int") [return-value]
return np.prod(self.shape) # type: ignore[return-value]
def set_axis(
self: NDFrameT,
labels,
*,
axis: Axis = 0,
copy: bool_t | None = None,
) -> NDFrameT:
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows. For `Series`
this parameter is unused and defaults to 0.
copy : bool, default True
Whether to make a copy of the underlying data.
.. versionadded:: 1.5.0
Returns
-------
%(klass)s
An object of type %(klass)s.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy)
def _set_axis_nocheck(
self, labels, axis: Axis, inplace: bool_t, copy: bool_t | None
):
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
# With copy=False, we create a new object but don't copy the
# underlying data.
obj = self.copy(deep=copy and not using_copy_on_write())
setattr(obj, obj._get_axis_name(axis), labels)
return obj
def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None:
"""
This is called from the cython code when we set the `index` attribute
directly, e.g. `series.index = [1, 2, 3]`.
"""
labels = ensure_index(labels)
self._mgr.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(
self: NDFrameT, axis1: Axis, axis2: Axis, copy: bool_t | None = None
) -> NDFrameT:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
return self.copy(deep=copy and not using_copy_on_write())
mapping = {i: j, j: i}
new_axes = [self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)]
new_values = self._values.swapaxes(i, j) # type: ignore[union-attr]
if (
using_copy_on_write()
and self._mgr.is_single_block
and isinstance(self._mgr, BlockManager)
):
# This should only get hit in case of having a single block, otherwise a
# copy is made, we don't have to set up references.
new_mgr = ndarray_to_mgr(
new_values,
new_axes[0],
new_axes[1],
dtype=None,
copy=False,
typ="block",
)
assert isinstance(new_mgr, BlockManager)
assert isinstance(self._mgr, BlockManager)
new_mgr.blocks[0].refs = self._mgr.blocks[0].refs
new_mgr.blocks[0].refs.add_reference(
new_mgr.blocks[0] # type: ignore[arg-type]
)
return self._constructor(new_mgr).__finalize__(self, method="swapaxes")
elif (copy or copy is None) and self._mgr.is_single_block:
new_values = new_values.copy()
return self._constructor(
new_values,
*new_axes,
# The no-copy case for CoW is handled above
copy=False,
).__finalize__(self, method="swapaxes")
def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT:
"""
Return {klass} with requested index / column level(s) removed.
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
Axis along which the level(s) is removed:
* 0 or 'index': remove level(s) in column.
* 1 or 'columns': remove level(s) in row.
For `Series` this parameter is unused and defaults to 0.
Returns
-------
{klass}
{klass} with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
return self.set_axis(new_labels, axis=axis, copy=None)
def pop(self, item: Hashable) -> Series | Any:
result = self[item]
del self[item]
return result
def squeeze(self, axis: Axis | None = None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed. For `Series` this parameter is unused and defaults to `None`.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axes and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def _rename(
self: NDFrameT,
mapper: Renamer | None = None,
*,
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
copy: bool_t | None = None,
inplace: bool_t = False,
level: Level | None = None,
errors: str = "ignore",
) -> NDFrameT | None:
# called by Series.rename and DataFrame.rename
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
if mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
self._check_inplace_and_allows_duplicate_labels(inplace)
result = self if inplace else self.copy(deep=copy and not using_copy_on_write())
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
f = common.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
if ax._is_multi and level is not None:
indexer = ax.get_level_values(level).get_indexer_for(replacements)
else:
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
new_index = ax._transform_index(f, level=level)
result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False)
result._clear_item_cache()
if inplace:
self._update_inplace(result)
return None
else:
return result.__finalize__(self, method="rename")
def rename_axis(
self: NDFrameT,
mapper: IndexLabel | lib.NoDefault = ...,
*,
index=...,
columns=...,
axis: Axis = ...,
copy: bool_t | None = ...,
inplace: Literal[False] = ...,
) -> NDFrameT:
...
def rename_axis(
self,
mapper: IndexLabel | lib.NoDefault = ...,
*,
index=...,
columns=...,
axis: Axis = ...,
copy: bool_t | None = ...,
inplace: Literal[True],
) -> None:
...
def rename_axis(
self: NDFrameT,
mapper: IndexLabel | lib.NoDefault = ...,
*,
index=...,
columns=...,
axis: Axis = ...,
copy: bool_t | None = ...,
inplace: bool_t = ...,
) -> NDFrameT | None:
...
def rename_axis(
self: NDFrameT,
mapper: IndexLabel | lib.NoDefault = lib.no_default,
*,
index=lib.no_default,
columns=lib.no_default,
axis: Axis = 0,
copy: bool_t | None = None,
inplace: bool_t = False,
) -> NDFrameT | None:
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Note that the ``columns`` parameter is not allowed if the
object is a Series. This parameter only apply for DataFrame
type objects.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename. For `Series` this parameter is unused and defaults to 0.
copy : bool, default None
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes = {"index": index, "columns": columns}
if axis is not None:
axis = self._get_axis_number(axis)
inplace = validate_bool_kwarg(inplace, "inplace")
if copy and using_copy_on_write():
copy = False
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(
mapper, axis=axis, inplace=inplace, copy=copy
)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._get_axis_name(axis))
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = common.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True, copy=copy)
if not inplace:
return result
return None
def _set_axis_name(
self, name, axis: Axis = 0, inplace: bool_t = False, copy: bool_t | None = True
):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
copy:
Whether to make a copy of the result.
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
num_legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy(deep=copy)
if axis == 0:
renamed.index = idx
else:
renamed.columns = idx
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other) -> bool_t:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other: object) -> bool_t:
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal.
The row/column index do not need to have the same type, as long
as the values are considered equal. Corresponding columns must be of
the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not (isinstance(other, type(self)) or isinstance(self, type(other))):
return False
other = cast(NDFrame, other)
return self._mgr.equals(other._mgr)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self: NDFrameT) -> NDFrameT:
def blk_func(values: ArrayLike):
if is_bool_dtype(values.dtype):
# error: Argument 1 to "inv" has incompatible type "Union
# [ExtensionArray, ndarray[Any, Any]]"; expected
# "_SupportsInversion[ndarray[Any, dtype[bool_]]]"
return operator.inv(values) # type: ignore[arg-type]
else:
# error: Argument 1 to "neg" has incompatible type "Union
# [ExtensionArray, ndarray[Any, Any]]"; expected
# "_SupportsNeg[ndarray[Any, dtype[Any]]]"
return operator.neg(values) # type: ignore[arg-type]
new_data = self._mgr.apply(blk_func)
res = self._constructor(new_data)
return res.__finalize__(self, method="__neg__")
def __pos__(self: NDFrameT) -> NDFrameT:
def blk_func(values: ArrayLike):
if is_bool_dtype(values.dtype):
return values.copy()
else:
# error: Argument 1 to "pos" has incompatible type "Union
# [ExtensionArray, ndarray[Any, Any]]"; expected
# "_SupportsPos[ndarray[Any, dtype[Any]]]"
return operator.pos(values) # type: ignore[arg-type]
new_data = self._mgr.apply(blk_func)
res = self._constructor(new_data)
return res.__finalize__(self, method="__pos__")
def __invert__(self: NDFrameT) -> NDFrameT:
if not self.size:
# inv fails with 0 len
return self.copy(deep=False)
new_data = self._mgr.apply(operator.invert)
return self._constructor(new_data).__finalize__(self, method="__invert__")
def __nonzero__(self) -> NoReturn:
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self) -> bool_t:
"""
Return the bool of a single element Series or DataFrame.
This must be a boolean scalar value, either True or False. It will raise a
ValueError if the Series or DataFrame does not have exactly 1 element, or that
element is not boolean (integer values 0 and 1 will also raise an exception).
Returns
-------
bool
The value in the Series or DataFrame.
See Also
--------
Series.astype : Change the data type of a Series, including to boolean.
DataFrame.astype : Change the data type of a DataFrame, including to boolean.
numpy.bool_ : NumPy boolean data type, used by pandas for boolean values.
Examples
--------
The method will only work for single element objects with a boolean value:
>>> pd.Series([True]).bool()
True
>>> pd.Series([False]).bool()
False
>>> pd.DataFrame({'col': [True]}).bool()
True
>>> pd.DataFrame({'col': [False]}).bool()
False
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
# for mypy (__nonzero__ raises)
return True
def abs(self: NDFrameT) -> NDFrameT:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
res_mgr = self._mgr.apply(np.abs)
return self._constructor(res_mgr).__finalize__(self, name="abs")
def __abs__(self: NDFrameT) -> NDFrameT:
return self.abs()
def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT:
return self.round(decimals).__finalize__(self, method="__round__")
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool_t:
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : Hashable
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis_int = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis_int].names
and not self._is_label_reference(key, axis=axis_int)
)
def _is_label_reference(self, key: Level, axis: Axis = 0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key : Hashable
Potential label name, i.e. Index entry.
axis : int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis_int = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: Level, axis: AxisInt = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key : Hashable
Potential label or level name
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key : Hashable
Label or level name.
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis_int = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis_int].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis_int == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis_int == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: Level, axis: AxisInt = 0) -> ArrayLike:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key : Hashable
Label or level name.
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
np.ndarray or ExtensionArray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
f"The {label_axis_name} label '{key}' is not unique.{multi_message}"
)
return values
def _drop_labels_or_levels(self, keys, axis: AxisInt = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys : str or list of str
labels or levels to drop
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = common.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy(deep=False)
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
# https://github.com/python/typeshed/issues/2148#issuecomment-520783318
# Incompatible types in assignment (expression has type "None", base class
# "object" defined the type as "Callable[[object], int]")
__hash__: ClassVar[None] # type: ignore[assignment]
def __iter__(self) -> Iterator:
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self) -> Index:
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""
Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
def empty(self) -> bool_t:
"""
Indicator whether Series/DataFrame is empty.
True if Series/DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If Series/DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If Series/DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
>>> ser_empty = pd.Series({'A' : []})
>>> ser_empty
A []
dtype: object
>>> ser_empty.empty
False
>>> ser_empty = pd.Series()
>>> ser_empty.empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__: int = 1000
def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
values = self._values
arr = np.asarray(values, dtype=dtype)
if (
astype_is_view(values.dtype, arr.dtype)
and using_copy_on_write()
and self._mgr.is_single_block
):
# Check if both conversions can be done without a copy
if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view(
values.dtype, arr.dtype
):
arr = arr.view()
arr.flags.writeable = False
return arr
def __array_ufunc__(
self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any
):
return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs)
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self) -> dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return {
"_mgr": self._mgr,
"_typ": self._typ,
"_metadata": self._metadata,
"attrs": self.attrs,
"_flags": {k: self.flags[k] for k in self.flags._keys},
**meta,
}
def __setstate__(self, state) -> None:
if isinstance(state, BlockManager):
self._mgr = state
elif isinstance(state, dict):
if "_data" in state and "_mgr" not in state:
# compat for older pickles
state["_mgr"] = state.pop("_data")
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
flags = state.get("_flags", {"allows_duplicate_labels": True})
object.__setattr__(self, "_flags", Flags(self, **flags))
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _mgr to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state and k != "_flags":
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache: dict[Hashable, Series] = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("styler.render.repr") == "latex":
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
as_json = data.to_json(orient="table")
as_json = cast(str, as_json)
return loads(as_json, object_pairs_hook=collections.OrderedDict)
# ----------------------------------------------------------------------
# I/O Methods
klass="object",
storage_options=_shared_docs["storage_options"],
storage_options_versionadded="1.2.0",
)
def to_excel(
self,
excel_writer,
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: str | None = None,
columns: Sequence[Hashable] | None = None,
header: Sequence[Hashable] | bool_t = True,
index: bool_t = True,
index_label: IndexLabel = None,
startrow: int = 0,
startcol: int = 0,
engine: str | None = None,
merge_cells: bool_t = True,
inf_rep: str = "inf",
freeze_panes: tuple[int, int] | None = None,
storage_options: StorageOptions = None,
) -> None:
"""
Write {klass} to an Excel sheet.
To write a single {klass} to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : path-like, file-like, or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer`` or
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
{storage_options}
.. versionadded:: {storage_options_versionadded}
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
io.formats.style.Styler.to_excel : Add styles to Excel sheet.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible to write further
data without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
storage_options=storage_options,
)
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "path_or_buf",
)
def to_json(
self,
path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
orient: str | None = None,
date_format: str | None = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Callable[[Any], JSONSerializable] | None = None,
lines: bool_t = False,
compression: CompressionOptions = "infer",
index: bool_t = True,
indent: int | None = None,
storage_options: StorageOptions = None,
mode: Literal["a", "w"] = "w",
) -> str | None:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str, path object, file-like object, or None, default None
String, path object (implementing os.PathLike[str]), or file-like
object implementing a write() function. If None, the result is
returned as a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {{'split', 'records', 'index', 'table'}}.
* DataFrame:
- default is 'columns'
- allowed values are: {{'split', 'records', 'index', 'columns',
'values', 'table'}}.
* The format of the JSON string:
- 'split' : dict like {{'index' -> [index], 'columns' -> [columns],
'data' -> [values]}}
- 'records' : list like [{{column -> value}}, ... , {{column -> value}}]
- 'index' : dict like {{index -> {{column -> value}}}}
- 'columns' : dict like {{column -> {{index -> value}}}}
- 'values' : just the values array
- 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}}
Describing the data, where data component is like ``orient='records'``.
date_format : {{None, 'epoch', 'iso'}}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line-delimited json format. Will
throw ValueError if incorrect 'orient' since others are not
list-like.
{compression_options}
.. versionchanged:: 1.4.0 Zstandard support.
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
indent : int, optional
Length of whitespace used to indent each record.
{storage_options}
.. versionadded:: 1.2.0
mode : str, default 'w' (writing)
Specify the IO mode for output when supplying a path_or_buf.
Accepted args are 'w' (writing) and 'a' (append) only.
mode='a' is only supported when lines is True and orient is 'records'.
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json : Convert a JSON string to pandas object.
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
``orient='table'`` contains a 'pandas_version' field under 'schema'.
This stores the version of `pandas` used in the latest revision of the
schema.
Examples
--------
>>> from json import loads, dumps
>>> df = pd.DataFrame(
... [["a", "b"], ["c", "d"]],
... index=["row 1", "row 2"],
... columns=["col 1", "col 2"],
... )
>>> result = df.to_json(orient="split")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4) # doctest: +SKIP
{{
"columns": [
"col 1",
"col 2"
],
"index": [
"row 1",
"row 2"
],
"data": [
[
"a",
"b"
],
[
"c",
"d"
]
]
}}
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> result = df.to_json(orient="records")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4) # doctest: +SKIP
[
{{
"col 1": "a",
"col 2": "b"
}},
{{
"col 1": "c",
"col 2": "d"
}}
]
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> result = df.to_json(orient="index")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4) # doctest: +SKIP
{{
"row 1": {{
"col 1": "a",
"col 2": "b"
}},
"row 2": {{
"col 1": "c",
"col 2": "d"
}}
}}
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> result = df.to_json(orient="columns")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4) # doctest: +SKIP
{{
"col 1": {{
"row 1": "a",
"row 2": "c"
}},
"col 2": {{
"row 1": "b",
"row 2": "d"
}}
}}
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> result = df.to_json(orient="values")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4) # doctest: +SKIP
[
[
"a",
"b"
],
[
"c",
"d"
]
]
Encoding with Table Schema:
>>> result = df.to_json(orient="table")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4) # doctest: +SKIP
{{
"schema": {{
"fields": [
{{
"name": "index",
"type": "string"
}},
{{
"name": "col 1",
"type": "string"
}},
{{
"name": "col 2",
"type": "string"
}}
],
"primaryKey": [
"index"
],
"pandas_version": "1.4.0"
}},
"data": [
{{
"index": "row 1",
"col 1": "a",
"col 2": "b"
}},
{{
"index": "row 2",
"col 1": "c",
"col 2": "d"
}}
]
}}
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
storage_options=storage_options,
mode=mode,
)
def to_hdf(
self,
path_or_buf: FilePath | HDFStore,
key: str,
mode: str = "a",
complevel: int | None = None,
complib: str | None = None,
append: bool_t = False,
format: str | None = None,
index: bool_t = True,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
dropna: bool_t | None = None,
data_columns: Literal[True] | list[str] | None = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
.. warning::
One can store a subclass of ``DataFrame`` or ``Series`` to HDF5,
but the type of the subclass is lost upon storing.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed".
index : bool, default True
Write DataFrame index as a column.
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
dropna : bool, default False, optional
Remove missing values.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See
:ref:`Query via data columns<io.hdf5-query-data-columns>`. for
more information.
Applicable only to format='table'.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
See Also
--------
read_hdf : Read from HDF file.
DataFrame.to_orc : Write a DataFrame to the binary orc format.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a SQL table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c']) # doctest: +SKIP
>>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP
>>> s.to_hdf('data.h5', key='s') # doctest: +SKIP
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's') # doctest: +SKIP
0 1
1 2
2 3
3 4
dtype: int64
"""
from pandas.io import pytables
# Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected
# "Union[DataFrame, Series]" [arg-type]
pytables.to_hdf(
path_or_buf,
key,
self, # type: ignore[arg-type]
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema: str | None = None,
if_exists: Literal["fail", "replace", "append"] = "fail",
index: bool_t = True,
index_label: IndexLabel = None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
method: str | None = None,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable. See `here \
<https://docs.sqlalchemy.org/en/20/core/connections.html>`_.
If passing a sqlalchemy.engine.Connection which is already in a transaction,
the transaction will not be committed. If passing a sqlite3.Connection,
it will not be possible to roll back the record insertion.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
Returns
-------
None or int
Number of rows affected by to_sql. None is returned if the callable
passed into ``method`` does not return an integer number of rows.
The number of returned rows affected is the sum of the ``rowcount``
attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not
reflect the exact number of written rows as stipulated in the
`sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or
`SQLAlchemy <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.CursorResult.rowcount>`__.
.. versionadded:: 1.4.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
3
>>> from sqlalchemy import text
>>> with engine.connect() as conn:
... conn.execute(text("SELECT * FROM users")).fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
An `sqlalchemy.engine.Connection` can also be passed to `con`:
>>> with engine.begin() as connection:
... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
... df1.to_sql('users', con=connection, if_exists='append')
2
This is allowed to support operations that require that the same
DBAPI connection is used for the entire operation.
>>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']})
>>> df2.to_sql('users', con=engine, if_exists='append')
2
>>> with engine.connect() as conn:
... conn.execute(text("SELECT * FROM users")).fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5'), (0, 'User 6'),
(1, 'User 7')]
Overwrite the table with just ``df2``.
>>> df2.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
2
>>> with engine.connect() as conn:
... conn.execute(text("SELECT * FROM users")).fetchall()
[(0, 'User 6'), (1, 'User 7')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
3
>>> with engine.connect() as conn:
... conn.execute(text("SELECT * FROM integers")).fetchall()
[(1,), (None,), (2,)]
""" # noqa:E501
from pandas.io import sql
return sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "path",
)
def to_pickle(
self,
path: FilePath | WriteBuffer[bytes],
compression: CompressionOptions = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
storage_options: StorageOptions = None,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function. File path where
the pickled object will be stored.
{compression_options}
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4, 5. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
{storage_options}
.. versionadded:: 1.2.0
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP
>>> original_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP
>>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
>>> unpickled_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
""" # noqa: E501
from pandas.io.pickle import to_pickle
to_pickle(
self,
path,
compression=compression,
protocol=protocol,
storage_options=storage_options,
)
def to_clipboard(
self, excel: bool_t = True, sep: str | None = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- macOS : none
This method uses the processes developed for the package `pyperclip`. A
solution to render any output string format is given in the examples.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
Using the original `pyperclip` package for any string output format.
.. code-block:: python
import pyperclip
html = df.style.to_html()
pyperclip.copy(html)
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (date: 2, animal: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
def to_latex(
self,
buf: None = ...,
columns: Sequence[Hashable] | None = ...,
header: bool_t | Sequence[str] = ...,
index: bool_t = ...,
na_rep: str = ...,
formatters: FormattersType | None = ...,
float_format: FloatFormatType | None = ...,
sparsify: bool_t | None = ...,
index_names: bool_t = ...,
bold_rows: bool_t = ...,
column_format: str | None = ...,
longtable: bool_t | None = ...,
escape: bool_t | None = ...,
encoding: str | None = ...,
decimal: str = ...,
multicolumn: bool_t | None = ...,
multicolumn_format: str | None = ...,
multirow: bool_t | None = ...,
caption: str | tuple[str, str] | None = ...,
label: str | None = ...,
position: str | None = ...,
) -> str:
...
def to_latex(
self,
buf: FilePath | WriteBuffer[str],
columns: Sequence[Hashable] | None = ...,
header: bool_t | Sequence[str] = ...,
index: bool_t = ...,
na_rep: str = ...,
formatters: FormattersType | None = ...,
float_format: FloatFormatType | None = ...,
sparsify: bool_t | None = ...,
index_names: bool_t = ...,
bold_rows: bool_t = ...,
column_format: str | None = ...,
longtable: bool_t | None = ...,
escape: bool_t | None = ...,
encoding: str | None = ...,
decimal: str = ...,
multicolumn: bool_t | None = ...,
multicolumn_format: str | None = ...,
multirow: bool_t | None = ...,
caption: str | tuple[str, str] | None = ...,
label: str | None = ...,
position: str | None = ...,
) -> None:
...
def to_latex(
self,
buf: FilePath | WriteBuffer[str] | None = None,
columns: Sequence[Hashable] | None = None,
header: bool_t | Sequence[str] = True,
index: bool_t = True,
na_rep: str = "NaN",
formatters: FormattersType | None = None,
float_format: FloatFormatType | None = None,
sparsify: bool_t | None = None,
index_names: bool_t = True,
bold_rows: bool_t = False,
column_format: str | None = None,
longtable: bool_t | None = None,
escape: bool_t | None = None,
encoding: str | None = None,
decimal: str = ".",
multicolumn: bool_t | None = None,
multicolumn_format: str | None = None,
multirow: bool_t | None = None,
caption: str | tuple[str, str] | None = None,
label: str | None = None,
position: str | None = None,
) -> str | None:
r"""
Render object to a LaTeX tabular, longtable, or nested table.
Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{{table.tex}}``.
.. versionchanged:: 1.2.0
Added position argument, changed meaning of caption argument.
.. versionchanged:: 2.0.0
Refactored to use the Styler implementation via jinja2 templating.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {{str: function}}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
Use a longtable environment instead of tabular. Requires
adding a \usepackage{{longtable}} to your LaTeX preamble.
By default, the value will be read from the pandas config
module, and set to `True` if the option ``styler.latex.environment`` is
`"longtable"`.
.. versionchanged:: 2.0.0
The pandas option affecting this argument has changed.
escape : bool, optional
By default, the value will be read from the pandas config
module and set to `True` if the option ``styler.format.escape`` is
`"latex"`. When set to False prevents from escaping latex special
characters in column names.
.. versionchanged:: 2.0.0
The pandas option affecting this argument has changed, as has the
default value to `False`.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module, and is set
as the option ``styler.sparse.columns``.
.. versionchanged:: 2.0.0
The pandas option affecting this argument has changed.
multicolumn_format : str, default 'r'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module, and is set as the option
``styler.latex.multicol_align``.
.. versionchanged:: 2.0.0
The pandas option affecting this argument has changed, as has the
default value to "r".
multirow : bool, default True
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{{multirow}} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module, and is set as the option
``styler.sparse.index``.
.. versionchanged:: 2.0.0
The pandas option affecting this argument has changed, as has the
default value to `True`.
caption : str or tuple, optional
Tuple (full_caption, short_caption),
which results in ``\caption[short_caption]{{full_caption}}``;
if a single string is passed, no short caption will be set.
.. versionchanged:: 1.2.0
Optionally allow caption to be a tuple ``(full_caption, short_caption)``.
label : str, optional
The LaTeX label to be placed inside ``\label{{}}`` in the output.
This is used with ``\ref{{}}`` in the main ``.tex`` file.
position : str, optional
The LaTeX positional argument for tables, to be placed after
``\begin{{}}`` in the output.
.. versionadded:: 1.2.0
Returns
-------
str or None
If buf is None, returns the result as a string. Otherwise returns None.
See Also
--------
io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX
with conditional formatting.
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Notes
-----
As of v2.0.0 this method has changed to use the Styler implementation as
part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means
that ``jinja2`` is a requirement, and needs to be installed, for this method
to function. It is advised that users switch to using Styler, since that
implementation is more frequently updated and contains much more
flexibility with the output.
Examples
--------
Convert a general DataFrame to LaTeX with formatting:
>>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'],
... age=[26, 45],
... height=[181.23, 177.65]))
>>> print(df.to_latex(index=False,
... formatters={"name": str.upper},
... float_format="{:.1f}".format,
... )) # doctest: +SKIP
\begin{tabular}{lrr}
\toprule
name & age & height \\
\midrule
RAPHAEL & 26 & 181.2 \\
DONATELLO & 45 & 177.7 \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("styler.latex.environment") == "longtable"
if escape is None:
escape = config.get_option("styler.format.escape") == "latex"
if multicolumn is None:
multicolumn = config.get_option("styler.sparse.columns")
if multicolumn_format is None:
multicolumn_format = config.get_option("styler.latex.multicol_align")
if multirow is None:
multirow = config.get_option("styler.sparse.index")
if column_format is not None and not isinstance(column_format, str):
raise ValueError("`column_format` must be str or unicode")
length = len(self.columns) if columns is None else len(columns)
if isinstance(header, (list, tuple)) and len(header) != length:
raise ValueError(f"Writing {length} cols but got {len(header)} aliases")
# Refactor formatters/float_format/decimal/na_rep/escape to Styler structure
base_format_ = {
"na_rep": na_rep,
"escape": "latex" if escape else None,
"decimal": decimal,
}
index_format_: dict[str, Any] = {"axis": 0, **base_format_}
column_format_: dict[str, Any] = {"axis": 1, **base_format_}
if isinstance(float_format, str):
float_format_: Callable | None = lambda x: float_format % x
else:
float_format_ = float_format
def _wrap(x, alt_format_):
if isinstance(x, (float, complex)) and float_format_ is not None:
return float_format_(x)
else:
return alt_format_(x)
formatters_: list | tuple | dict | Callable | None = None
if isinstance(formatters, list):
formatters_ = {
c: partial(_wrap, alt_format_=formatters[i])
for i, c in enumerate(self.columns)
}
elif isinstance(formatters, dict):
index_formatter = formatters.pop("__index__", None)
column_formatter = formatters.pop("__columns__", None)
if index_formatter is not None:
index_format_.update({"formatter": index_formatter})
if column_formatter is not None:
column_format_.update({"formatter": column_formatter})
formatters_ = formatters
float_columns = self.select_dtypes(include="float").columns
for col in float_columns:
if col not in formatters.keys():
formatters_.update({col: float_format_})
elif formatters is None and float_format is not None:
formatters_ = partial(_wrap, alt_format_=lambda v: v)
format_index_ = [index_format_, column_format_]
# Deal with hiding indexes and relabelling column names
hide_: list[dict] = []
relabel_index_: list[dict] = []
if columns:
hide_.append(
{
"subset": [c for c in self.columns if c not in columns],
"axis": "columns",
}
)
if header is False:
hide_.append({"axis": "columns"})
elif isinstance(header, (list, tuple)):
relabel_index_.append({"labels": header, "axis": "columns"})
format_index_ = [index_format_] # column_format is overwritten
if index is False:
hide_.append({"axis": "index"})
if index_names is False:
hide_.append({"names": True, "axis": "index"})
render_kwargs_ = {
"hrules": True,
"sparse_index": sparsify,
"sparse_columns": sparsify,
"environment": "longtable" if longtable else None,
"multicol_align": multicolumn_format
if multicolumn
else f"naive-{multicolumn_format}",
"multirow_align": "t" if multirow else "naive",
"encoding": encoding,
"caption": caption,
"label": label,
"position": position,
"column_format": column_format,
"clines": "skip-last;data"
if (multirow and isinstance(self.index, MultiIndex))
else None,
"bold_rows": bold_rows,
}
return self._to_latex_via_styler(
buf,
hide=hide_,
relabel_index=relabel_index_,
format={"formatter": formatters_, **base_format_},
format_index=format_index_,
render_kwargs=render_kwargs_,
)
def _to_latex_via_styler(
self,
buf=None,
*,
hide: dict | list[dict] | None = None,
relabel_index: dict | list[dict] | None = None,
format: dict | list[dict] | None = None,
format_index: dict | list[dict] | None = None,
render_kwargs: dict | None = None,
):
"""
Render object to a LaTeX tabular, longtable, or nested table.
Uses the ``Styler`` implementation with the following, ordered, method chaining:
.. code-block:: python
styler = Styler(DataFrame)
styler.hide(**hide)
styler.relabel_index(**relabel_index)
styler.format(**format)
styler.format_index(**format_index)
styler.to_latex(buf=buf, **render_kwargs)
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
hide : dict, list of dict
Keyword args to pass to the method call of ``Styler.hide``. If a list will
call the method numerous times.
relabel_index : dict, list of dict
Keyword args to pass to the method of ``Styler.relabel_index``. If a list
will call the method numerous times.
format : dict, list of dict
Keyword args to pass to the method call of ``Styler.format``. If a list will
call the method numerous times.
format_index : dict, list of dict
Keyword args to pass to the method call of ``Styler.format_index``. If a
list will call the method numerous times.
render_kwargs : dict
Keyword args to pass to the method call of ``Styler.to_latex``.
Returns
-------
str or None
If buf is None, returns the result as a string. Otherwise returns None.
"""
from pandas.io.formats.style import Styler
self = cast("DataFrame", self)
styler = Styler(self, uuid="")
for kw_name in ["hide", "relabel_index", "format", "format_index"]:
kw = vars()[kw_name]
if isinstance(kw, dict):
getattr(styler, kw_name)(**kw)
elif isinstance(kw, list):
for sub_kw in kw:
getattr(styler, kw_name)(**sub_kw)
# bold_rows is not a direct kwarg of Styler.to_latex
render_kwargs = {} if render_kwargs is None else render_kwargs
if render_kwargs.pop("bold_rows"):
styler.applymap_index(lambda v: "textbf:--rwrap;")
return styler.to_latex(buf=buf, **render_kwargs)
def to_csv(
self,
path_or_buf: None = ...,
sep: str = ...,
na_rep: str = ...,
float_format: str | Callable | None = ...,
columns: Sequence[Hashable] | None = ...,
header: bool_t | list[str] = ...,
index: bool_t = ...,
index_label: IndexLabel | None = ...,
mode: str = ...,
encoding: str | None = ...,
compression: CompressionOptions = ...,
quoting: int | None = ...,
quotechar: str = ...,
lineterminator: str | None = ...,
chunksize: int | None = ...,
date_format: str | None = ...,
doublequote: bool_t = ...,
escapechar: str | None = ...,
decimal: str = ...,
errors: str = ...,
storage_options: StorageOptions = ...,
) -> str:
...
def to_csv(
self,
path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str],
sep: str = ...,
na_rep: str = ...,
float_format: str | Callable | None = ...,
columns: Sequence[Hashable] | None = ...,
header: bool_t | list[str] = ...,
index: bool_t = ...,
index_label: IndexLabel | None = ...,
mode: str = ...,
encoding: str | None = ...,
compression: CompressionOptions = ...,
quoting: int | None = ...,
quotechar: str = ...,
lineterminator: str | None = ...,
chunksize: int | None = ...,
date_format: str | None = ...,
doublequote: bool_t = ...,
escapechar: str | None = ...,
decimal: str = ...,
errors: str = ...,
storage_options: StorageOptions = ...,
) -> None:
...
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "path_or_buf",
)
def to_csv(
self,
path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
sep: str = ",",
na_rep: str = "",
float_format: str | Callable | None = None,
columns: Sequence[Hashable] | None = None,
header: bool_t | list[str] = True,
index: bool_t = True,
index_label: IndexLabel | None = None,
mode: str = "w",
encoding: str | None = None,
compression: CompressionOptions = "infer",
quoting: int | None = None,
quotechar: str = '"',
lineterminator: str | None = None,
chunksize: int | None = None,
date_format: str | None = None,
doublequote: bool_t = True,
escapechar: str | None = None,
decimal: str = ".",
errors: str = "strict",
storage_options: StorageOptions = None,
) -> str | None:
r"""
Write object to a comma-separated values (csv) file.
Parameters
----------
path_or_buf : str, path object, file-like object, or None, default None
String, path object (implementing os.PathLike[str]), or file-like
object implementing a write() function. If None, the result is
returned as a string. If a non-binary file object is passed, it should
be opened with `newline=''`, disabling universal newlines. If a binary
file object is passed, `mode` might need to contain a `'b'`.
.. versionchanged:: 1.2.0
Support for binary file objects was introduced.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, Callable, default None
Format string for floating point numbers. If a Callable is given, it takes
precedence over other numeric formatting parameters, like decimal.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str, default 'w'
Python write mode. The available write modes are the same as
:py:func:`open`.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'. `encoding` is not supported if `path_or_buf`
is a non-binary file object.
{compression_options}
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
.. versionchanged:: 1.1.0
Passing compression options as keys in dict is
supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'.
.. versionchanged:: 1.2.0
Compression is supported for binary file objects.
.. versionchanged:: 1.2.0
Previous versions forwarded dict entries for 'gzip' to
`gzip.open` instead of `gzip.GzipFile` which prevented
setting `mtime`.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
lineterminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).
.. versionchanged:: 1.5.0
Previously was line_terminator, changed for consistency with
read_csv and the standard library 'csv' module.
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']}})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
To write a csv file to a new folder or nested folder you will first
need to create it using either Pathlib or os:
>>> from pathlib import Path # doctest: +SKIP
>>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP
>>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP
>>> df.to_csv(filepath) # doctest: +SKIP
>>> import os # doctest: +SKIP
>>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP
>>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
formatter = DataFrameFormatter(
frame=df,
header=header,
index=index,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
)
return DataFrameRenderer(formatter).to_csv(
path_or_buf,
lineterminator=lineterminator,
sep=sep,
encoding=encoding,
errors=errors,
compression=compression,
quoting=quoting,
columns=columns,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
storage_options=storage_options,
)
# ----------------------------------------------------------------------
# Lookup Caching
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
raise AbstractMethodError(self)
def _maybe_update_cacher(
self,
clear: bool_t = False,
verify_is_copy: bool_t = True,
inplace: bool_t = False,
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
if using_copy_on_write():
return
if verify_is_copy:
self._check_setitem_copy(t="referent")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
raise AbstractMethodError(self)
# ----------------------------------------------------------------------
# Indexing Methods
def take(self: NDFrameT, indices, axis: Axis = 0, **kwargs) -> NDFrameT:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
For `Series` this parameter is unused and defaults to 0.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
nv.validate_take((), kwargs)
return self._take(indices, axis)
def _take(
self: NDFrameT,
indices,
axis: Axis = 0,
convert_indices: bool_t = True,
) -> NDFrameT:
"""
Internal version of the `take` allowing specification of additional args.
See the docstring of `take` for full explanation of the parameters.
"""
if not isinstance(indices, slice):
indices = np.asarray(indices, dtype=np.intp)
if (
axis == 0
and indices.ndim == 1
and using_copy_on_write()
and is_range_indexer(indices, len(self))
):
return self.copy(deep=None)
new_data = self._mgr.take(
indices,
axis=self._get_block_manager_axis(axis),
verify=True,
convert_indices=convert_indices,
)
return self._constructor(new_data).__finalize__(self, method="take")
def _take_with_is_copy(self: NDFrameT, indices, axis: Axis = 0) -> NDFrameT:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self._take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(
self: NDFrameT,
key: IndexLabel,
axis: Axis = 0,
level: IndexLabel = None,
drop_level: bool_t = True,
) -> NDFrameT:
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog', 'walks'))
num_legs 4
num_wings 0
Name: (mammal, dog, walks), dtype: int64
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if isinstance(key, list):
raise TypeError("list keys are not supported in xs, pass a tuple instead")
if level is not None:
if not isinstance(labels, MultiIndex):
raise TypeError("Index must be a MultiIndex")
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
if drop_level:
return self[key]
index = self.columns
else:
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = index._get_loc_level(key, level=0)
if not drop_level:
if lib.is_integer(loc):
new_index = index[loc : loc + 1]
else:
new_index = index[loc]
else:
loc = index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = index[loc]
if is_scalar(loc) and axis == 0:
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_mgr = self._mgr.fast_xs(loc)
result = self._constructor_sliced(
new_mgr, name=self.index[loc]
).__finalize__(self)
elif is_scalar(loc):
result = self.iloc[:, slice(loc, loc + 1)]
elif axis == 1:
result = self.iloc[:, loc]
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
def __getitem__(self, item):
raise AbstractMethodError(self)
def _slice(self: NDFrameT, slobj: slice, axis: Axis = 0) -> NDFrameT:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._mgr.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_copy:
self._check_setitem_copy(t="referent")
return False
def _check_setitem_copy(self, t: str = "setting", force: bool_t = False):
"""
Parameters
----------
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
if using_copy_on_write():
return
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or (r is not None and r.shape == self.shape):
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referent":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise SettingWithCopyError(t)
if value == "warn":
warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level())
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
# By using engine's __contains__ we effectively
# restrict to same-length tuples
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
loc = self.axes[-1].get_loc(key)
self._mgr = self._mgr.idelete(loc)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def _check_inplace_and_allows_duplicate_labels(self, inplace):
if inplace and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'inplace=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
same type as items contained in object
Examples
--------
>>> df = pd.DataFrame(
... [
... [24.3, 75.7, "high"],
... [31, 87.8, "high"],
... [22, 71.6, "medium"],
... [35, 95, "medium"],
... ],
... columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
... )
>>> df
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df.get(["temp_celsius", "windspeed"])
temp_celsius windspeed
2014-02-12 24.3 high
2014-02-13 31.0 high
2014-02-14 22.0 medium
2014-02-15 35.0 medium
>>> ser = df['windspeed']
>>> ser.get('2014-02-13')
'high'
If the key isn't found, the default value will be used.
>>> df.get(["temp_celsius", "temp_kelvin"], default="default_value")
'default_value'
>>> ser.get('2014-02-10', '[unknown]')
'[unknown]'
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array"""
return self._mgr.is_view
def reindex_like(
self: NDFrameT,
other,
method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None,
copy: bool_t | None = None,
limit=None,
tolerance=None,
) -> NDFrameT:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels: IndexLabel = ...,
*,
axis: Axis = ...,
index: IndexLabel = ...,
columns: IndexLabel = ...,
level: Level | None = ...,
inplace: Literal[True],
errors: IgnoreRaise = ...,
) -> None:
...
def drop(
self: NDFrameT,
labels: IndexLabel = ...,
*,
axis: Axis = ...,
index: IndexLabel = ...,
columns: IndexLabel = ...,
level: Level | None = ...,
inplace: Literal[False] = ...,
errors: IgnoreRaise = ...,
) -> NDFrameT:
...
def drop(
self: NDFrameT,
labels: IndexLabel = ...,
*,
axis: Axis = ...,
index: IndexLabel = ...,
columns: IndexLabel = ...,
level: Level | None = ...,
inplace: bool_t = ...,
errors: IgnoreRaise = ...,
) -> NDFrameT | None:
...
def drop(
self: NDFrameT,
labels: IndexLabel = None,
*,
axis: Axis = 0,
index: IndexLabel = None,
columns: IndexLabel = None,
level: Level | None = None,
inplace: bool_t = False,
errors: IgnoreRaise = "raise",
) -> NDFrameT | None:
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes = {"index": index}
if self.ndim == 2:
axes["columns"] = columns
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
return None
else:
return obj
def _drop_axis(
self: NDFrameT,
labels,
axis,
level=None,
errors: IgnoreRaise = "raise",
only_slice: bool_t = False,
) -> NDFrameT:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
only_slice : bool, default False
Whether indexing along columns should be view-only.
"""
axis_num = self._get_axis_number(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
indexer = axis.get_indexer(new_axis)
# Case for non-unique axis
else:
is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple)
labels = ensure_object(common.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
mask = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and mask.all():
raise KeyError(f"{labels} not found in axis")
elif (
isinstance(axis, MultiIndex)
and labels.dtype == "object"
and not is_tuple_labels
):
# Set level to zero in case of MultiIndex and label is string,
# because isin can't handle strings for MultiIndexes GH#36293
# In case of tuples we get dtype object but have to use isin GH#42771
mask = ~axis.get_level_values(0).isin(labels)
else:
mask = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
if is_extension_array_dtype(mask.dtype):
# GH#45860
mask = mask.to_numpy(dtype=bool)
indexer = mask.nonzero()[0]
new_axis = axis.take(indexer)
bm_axis = self.ndim - axis_num - 1
new_mgr = self._mgr.reindex_indexer(
new_axis,
indexer,
axis=bm_axis,
allow_dups=True,
copy=None,
only_slice=only_slice,
)
result = self._constructor(new_mgr)
if self.ndim == 1:
result.name = self.name
return result.__finalize__(self)
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
result : same type as self
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._mgr = result._mgr
self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True)
def add_prefix(self: NDFrameT, prefix: str, axis: Axis | None = None) -> NDFrameT:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Axis to add prefix on
.. versionadded:: 2.0.0
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = lambda x: f"{prefix}{x}"
axis_name = self._info_axis_name
if axis is not None:
axis_name = self._get_axis_name(axis)
mapper = {axis_name: f}
# error: Incompatible return value type (got "Optional[NDFrameT]",
# expected "NDFrameT")
# error: Argument 1 to "rename" of "NDFrame" has incompatible type
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
# error: Keywords must be strings
return self._rename(**mapper) # type: ignore[return-value, arg-type, misc]
def add_suffix(self: NDFrameT, suffix: str, axis: Axis | None = None) -> NDFrameT:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Axis to add suffix on
.. versionadded:: 2.0.0
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = lambda x: f"{x}{suffix}"
axis_name = self._info_axis_name
if axis is not None:
axis_name = self._get_axis_name(axis)
mapper = {axis_name: f}
# error: Incompatible return value type (got "Optional[NDFrameT]",
# expected "NDFrameT")
# error: Argument 1 to "rename" of "NDFrame" has incompatible type
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
# error: Keywords must be strings
return self._rename(**mapper) # type: ignore[return-value, arg-type, misc]
def sort_values(
self: NDFrameT,
*,
axis: Axis = ...,
ascending: bool_t | Sequence[bool_t] = ...,
inplace: Literal[False] = ...,
kind: str = ...,
na_position: str = ...,
ignore_index: bool_t = ...,
key: ValueKeyFunc = ...,
) -> NDFrameT:
...
def sort_values(
self,
*,
axis: Axis = ...,
ascending: bool_t | Sequence[bool_t] = ...,
inplace: Literal[True],
kind: str = ...,
na_position: str = ...,
ignore_index: bool_t = ...,
key: ValueKeyFunc = ...,
) -> None:
...
def sort_values(
self: NDFrameT,
*,
axis: Axis = ...,
ascending: bool_t | Sequence[bool_t] = ...,
inplace: bool_t = ...,
kind: str = ...,
na_position: str = ...,
ignore_index: bool_t = ...,
key: ValueKeyFunc = ...,
) -> NDFrameT | None:
...
def sort_values(
self: NDFrameT,
*,
axis: Axis = 0,
ascending: bool_t | Sequence[bool_t] = True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
key: ValueKeyFunc = None,
) -> NDFrameT | None:
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. `mergesort` and `stable` are the only stable algorithms. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
key : callable, optional
Apply the key function to the values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect a
``Series`` and return a Series with the same shape as the input.
It will be applied to each column in `by` independently.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
DataFrame with sorted values or None if ``inplace=True``.
See Also
--------
DataFrame.sort_index : Sort a DataFrame by the index.
Series.sort_values : Similar method for a Series.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... 'col4': ['a', 'B', 'c', 'D', 'e', 'F']
... })
>>> df
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
5 C 4 3 F
4 D 7 2 e
3 NaN 8 4 D
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3 col4
1 A 1 1 B
0 A 2 0 a
2 B 9 9 c
5 C 4 3 F
4 D 7 2 e
3 NaN 8 4 D
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3 col4
4 D 7 2 e
5 C 4 3 F
2 B 9 9 c
0 A 2 0 a
1 A 1 1 B
3 NaN 8 4 D
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3 col4
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
2 B 9 9 c
0 A 2 0 a
1 A 1 1 B
Sorting with a key function
>>> df.sort_values(by='col4', key=lambda col: col.str.lower())
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
Natural sort with the key argument,
using the `natsort <https://github.com/SethMMorton/natsort>` package.
>>> df = pd.DataFrame({
... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'],
... "value": [10, 20, 30, 40, 50]
... })
>>> df
time value
0 0hr 10
1 128hr 20
2 72hr 30
3 48hr 40
4 96hr 50
>>> from natsort import index_natsorted
>>> df.sort_values(
... by="time",
... key=lambda x: np.argsort(index_natsorted(df["time"]))
... )
time value
0 0hr 10
3 48hr 40
2 72hr 30
4 96hr 50
1 128hr 20
"""
raise AbstractMethodError(self)
def sort_index(
self,
*,
axis: Axis = ...,
level: IndexLabel = ...,
ascending: bool_t | Sequence[bool_t] = ...,
inplace: Literal[True],
kind: SortKind = ...,
na_position: NaPosition = ...,
sort_remaining: bool_t = ...,
ignore_index: bool_t = ...,
key: IndexKeyFunc = ...,
) -> None:
...
def sort_index(
self: NDFrameT,
*,
axis: Axis = ...,
level: IndexLabel = ...,
ascending: bool_t | Sequence[bool_t] = ...,
inplace: Literal[False] = ...,
kind: SortKind = ...,
na_position: NaPosition = ...,
sort_remaining: bool_t = ...,
ignore_index: bool_t = ...,
key: IndexKeyFunc = ...,
) -> NDFrameT:
...
def sort_index(
self: NDFrameT,
*,
axis: Axis = ...,
level: IndexLabel = ...,
ascending: bool_t | Sequence[bool_t] = ...,
inplace: bool_t = ...,
kind: SortKind = ...,
na_position: NaPosition = ...,
sort_remaining: bool_t = ...,
ignore_index: bool_t = ...,
key: IndexKeyFunc = ...,
) -> NDFrameT | None:
...
def sort_index(
self: NDFrameT,
*,
axis: Axis = 0,
level: IndexLabel = None,
ascending: bool_t | Sequence[bool_t] = True,
inplace: bool_t = False,
kind: SortKind = "quicksort",
na_position: NaPosition = "last",
sort_remaining: bool_t = True,
ignore_index: bool_t = False,
key: IndexKeyFunc = None,
) -> NDFrameT | None:
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
ascending = validate_ascending(ascending)
target = self._get_axis(axis)
indexer = get_indexer_indexer(
target, level, ascending, kind, na_position, sort_remaining, key
)
if indexer is None:
if inplace:
result = self
else:
result = self.copy(deep=None)
if ignore_index:
result.index = default_index(len(self))
if inplace:
return None
else:
return result
baxis = self._get_block_manager_axis(axis)
new_data = self._mgr.take(indexer, axis=baxis, verify=False)
# reconstruct axis if needed
new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic())
if ignore_index:
axis = 1 if isinstance(self, ABCDataFrame) else 0
new_data.set_axis(axis, default_index(len(indexer)))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_index")
klass=_shared_doc_kwargs["klass"],
optional_reindex="",
)
def reindex(
self: NDFrameT,
labels=None,
index=None,
columns=None,
axis: Axis | None = None,
method: str | None = None,
copy: bool_t | None = None,
level: Level | None = None,
fill_value: Scalar | None = np.nan,
limit: int | None = None,
tolerance=None,
) -> NDFrameT:
"""
Conform {klass} to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
{optional_reindex}
method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
{klass} with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={{'index', 'columns'}}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
if index is not None and columns is not None and labels is not None:
raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.")
elif index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
if labels is not None:
if index is not None:
columns = labels
else:
index = labels
else:
if axis and self._get_axis_number(axis) == 1:
columns = labels
else:
index = labels
axes: dict[Literal["index", "columns"], Any] = {
"index": index,
"columns": columns,
}
method = clean_reindex_fill_method(method)
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if copy and using_copy_on_write():
copy = False
if all(
self._get_axis(axis_name).identical(ax)
for axis_name, ax in axes.items()
if ax is not None
):
return self.copy(deep=copy)
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self, method="reindex")
def _reindex_axes(
self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy
) -> NDFrameT:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
# If we've made a copy once, no need to make another one
copy = False
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(common.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
and not (
self.ndim == 2
and len(self.dtypes) == 1
and is_extension_array_dtype(self.dtypes.iloc[0])
)
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: NDFrameT,
reindexers,
fill_value=None,
copy: bool_t | None = False,
allow_dups: bool_t = False,
) -> NDFrameT:
"""allow_dups indicates an internal call here"""
# reindex doing multiple operations on different axes if indicated
new_data = self._mgr
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_platform_int(indexer)
# TODO: speed up on homogeneous DataFrame objects (see _reindex_multi)
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
# If we've made a copy once, no need to make another one
copy = False
if (
(copy or copy is None)
and new_data is self._mgr
and not using_copy_on_write()
):
new_data = new_data.copy(deep=copy)
elif using_copy_on_write() and new_data is self._mgr:
new_data = new_data.copy(deep=False)
return self._constructor(new_data).__finalize__(self)
def filter(
self: NDFrameT,
items=None,
like: str | None = None,
regex: str | None = None,
axis: Axis | None = None,
) -> NDFrameT:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis, 'columns' for
DataFrame. For `Series` this parameter is unused and defaults to `None`.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = common.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
# error: Keywords must be strings
return self.reindex( # type: ignore[misc]
**{name: [r for r in items if r in labels]} # type: ignore[arg-type]
)
elif like:
def f(x) -> bool_t:
assert like is not None # needed for mypy
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x) -> bool_t:
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: NDFrameT, n: int = 5) -> NDFrameT:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `|n|` rows, equivalent to ``df[:n]``.
If n is larger than the number of rows, this function returns all rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
def tail(self: NDFrameT, n: int = 5) -> NDFrameT:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `|n|` rows, equivalent to ``df[|n|:]``.
If n is larger than the number of rows, this function returns all rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: NDFrameT,
n: int | None = None,
frac: float | None = None,
replace: bool_t = False,
weights=None,
random_state: RandomState | None = None,
axis: Axis | None = None,
ignore_index: bool_t = False,
) -> NDFrameT:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional
If int, array-like, or BitGenerator, seed for random number generator.
If np.random.RandomState or np.random.Generator, use as given.
.. versionchanged:: 1.1.0
array-like and BitGenerator object now passed to np.random.RandomState()
as seed
.. versionchanged:: 1.4.0
np.random.Generator objects now accepted
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type. For `Series` this parameter is unused and defaults to `None`.
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.3.0
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
DataFrameGroupBy.sample: Generates random samples from each group of a
DataFrame object.
SeriesGroupBy.sample: Generates random samples from each group of a
Series object.
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
""" # noqa:E501
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
obj_len = self.shape[axis]
# Process random_state argument
rs = common.random_state(random_state)
size = sample.process_sampling_size(n, frac, replace)
if size is None:
assert frac is not None
size = round(frac * obj_len)
if weights is not None:
weights = sample.preprocess_weights(self, weights, axis)
sampled_indices = sample.sample(obj_len, size, replace, weights, rs)
result = self.take(sampled_indices, axis=axis)
if ignore_index:
result.index = default_index(len(result))
return result
def pipe(
self,
func: Callable[..., T] | tuple[Callable[..., T], str],
*args,
**kwargs,
) -> T:
r"""
Apply chainable functions that expect Series or DataFrames.
Parameters
----------
func : function
Function to apply to the {klass}.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the {klass}.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(func, arg2=b, arg3=c)
... ) # doctest: +SKIP
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``func`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((func, 'arg2'), arg1=a, arg3=c)
... ) # doctest: +SKIP
"""
if using_copy_on_write():
return common.pipe(self.copy(deep=None), func, *args, **kwargs)
return common.pipe(self, func, *args, **kwargs)
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: NDFrameT, other, method: str | None = None, **kwargs
) -> NDFrameT:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : str, optional
A passed method name providing context on where ``__finalize__``
was called.
.. warning::
The value passed as `method` are not currently considered
stable across pandas releases.
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels
# For subclasses using _metadata.
for name in set(self._metadata) & set(other._metadata):
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
if method == "concat":
attrs = other.objs[0].attrs
check_attrs = all(objs.attrs == attrs for objs in other.objs[1:])
if check_attrs:
for name in attrs:
self.attrs[name] = attrs[name]
allows_duplicate_labels = all(
x.flags.allows_duplicate_labels for x in other.objs
)
self.flags.allows_duplicate_labels = allows_duplicate_labels
return self
def __getattr__(self, name: str):
"""
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name not in self._internal_names_set
and name not in self._metadata
and name not in self._accessors
and self._info_axis._can_hold_identifiers_and_holds_name(name)
):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=find_stack_level(),
)
object.__setattr__(self, name, value)
def _dir_additions(self) -> set[str]:
"""
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, its first level values are used.
"""
additions = super()._dir_additions()
if self._info_axis._can_hold_strings:
additions.update(self._info_axis._dir_additions_for_owner)
return additions
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""
Consolidate _mgr -- if the blocks have changed, then clear the
cache
"""
if isinstance(self._mgr, (ArrayManager, SingleArrayManager)):
return f()
blocks_before = len(self._mgr.blocks)
result = f()
if len(self._mgr.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f() -> None:
self._mgr = self._mgr.consolidate()
self._protect_consolidate(f)
def _consolidate(self):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Returns
-------
consolidated : same type as caller
"""
f = lambda: self._mgr.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
def _is_mixed_type(self) -> bool_t:
if self._mgr.is_single_block:
return False
if self._mgr.any_extension_types:
# Even if they have the same dtype, we can't consolidate them,
# so we pretend this is "mixed'"
return True
return self.dtypes.nunique() > 1
def _check_inplace_setting(self, value) -> bool_t:
"""check whether we allow in-place setting with this type of value"""
if self._is_mixed_type and not self._mgr.is_numeric_mixed_type:
# allow an actual np.nan through
if is_float(value) and np.isnan(value) or value is lib.no_default:
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self: NDFrameT) -> NDFrameT:
return self._constructor(self._mgr.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._mgr.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
def values(self):
raise AbstractMethodError(self)
def _values(self) -> ArrayLike:
"""internal implementation"""
raise AbstractMethodError(self)
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
data = self._mgr.get_dtypes()
return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_)
def astype(
self: NDFrameT, dtype, copy: bool_t | None = None, errors: IgnoreRaise = "raise"
) -> NDFrameT:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : str, data type, Series or Mapping of column name -> data type
Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to
cast entire pandas object to the same type. Alternatively, use a
mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is
a numpy.dtype or Python type to cast one or more of the DataFrame's
columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Notes
-----
.. versionchanged:: 2.0.0
Using ``astype`` to convert from timezone-naive dtype to
timezone-aware dtype will raise an exception.
Use :meth:`Series.dt.tz_localize` instead.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int32): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> from pandas.api.types import CategoricalDtype
>>> cat_dtype = CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Create a series of dates:
>>> ser_date = pd.Series(pd.date_range('20200101', periods=3))
>>> ser_date
0 2020-01-01
1 2020-01-02
2 2020-01-03
dtype: datetime64[ns]
"""
if copy and using_copy_on_write():
copy = False
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
# GH#44417 cast to Series so we can use .iat below, which will be
# robust in case we
from pandas import Series
dtype_ser = Series(dtype, dtype=object)
for col_name in dtype_ser.index:
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument. "
f"'{col_name}' not found in columns."
)
dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False)
results = []
for i, (col_name, col) in enumerate(self.items()):
cdt = dtype_ser.iat[i]
if isna(cdt):
res_col = col.copy(deep=copy)
else:
try:
res_col = col.astype(dtype=cdt, copy=copy, errors=errors)
except ValueError as ex:
ex.args = (
f"{ex}: Error while type casting for column '{col_name}'",
)
raise
results.append(res_col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
# TODO(EA2D): special case not needed with 2D EAs
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self, method="astype")
# GH 33113: handle empty frame or series
if not results:
return self.copy(deep=None)
# GH 19920: retain column metadata after concat
result = concat(results, axis=1, copy=False)
# GH#40810 retain subclass
# error: Incompatible types in assignment
# (expression has type "NDFrameT", variable has type "DataFrame")
result = self._constructor(result) # type: ignore[assignment]
result.columns = self.columns
result = result.__finalize__(self, method="astype")
# https://github.com/python/mypy/issues/8354
return cast(NDFrameT, result)
def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Since pandas is not thread safe, see the
:ref:`gotchas <gotchas.thread-safety>` when copying in a threading
environment.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._mgr.copy(deep=deep)
self._clear_item_cache()
return self._constructor(data).__finalize__(self, method="copy")
def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT:
return self.copy(deep=deep)
def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
def infer_objects(self: NDFrameT, copy: bool_t | None = None) -> NDFrameT:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
Parameters
----------
copy : bool, default True
Whether to make a copy for non-object or non-inferrable columns
or Series.
Returns
-------
same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
new_mgr = self._mgr.convert(copy=copy)
return self._constructor(new_mgr).__finalize__(self, method="infer_objects")
def convert_dtypes(
self: NDFrameT,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
convert_floating: bool_t = True,
dtype_backend: DtypeBackend = "numpy_nullable",
) -> NDFrameT:
"""
Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``.
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
convert_floating : bool, defaults True
Whether, if possible, conversion can be done to floating extension types.
If `convert_integer` is also True, preference will be give to integer
dtypes if the floats can be faithfully casted to integers.
.. versionadded:: 1.2.0
dtype_backend : {"numpy_nullable", "pyarrow"}, default "numpy_nullable"
Which dtype_backend to use, e.g. whether a DataFrame should use nullable
dtypes for all dtypes that have a nullable
implementation when "numpy_nullable" is set, pyarrow is used for all
dtypes if "pyarrow" is set.
The dtype_backends are still experimential.
.. versionadded:: 2.0
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, ``convert_boolean`` and
``convert_floating``, it is possible to turn off individual conversions
to ``StringDtype``, the integer extension types, ``BooleanDtype``
or floating extension types, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer
or floating extension type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type. Otherwise, convert to an
appropriate floating extension type.
.. versionchanged:: 1.2
Starting with pandas 1.2, this method also converts float columns
to the nullable floating extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
.. versionadded:: 2.0
The nullable dtype implementation can be configured by calling
``pd.set_option("mode.dtype_backend", "pandas")`` to use
numpy-backed nullable dtypes or
``pd.set_option("mode.dtype_backend", "pyarrow")`` to use
pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``).
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 <NA>
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string[python]
c boolean
d string[python]
e Int64
f Float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
check_dtype_backend(dtype_backend)
if self.ndim == 1:
return self._convert_dtypes(
infer_objects,
convert_string,
convert_integer,
convert_boolean,
convert_floating,
dtype_backend=dtype_backend,
)
else:
results = [
col._convert_dtypes(
infer_objects,
convert_string,
convert_integer,
convert_boolean,
convert_floating,
dtype_backend=dtype_backend,
)
for col_name, col in self.items()
]
if len(results) > 0:
result = concat(results, axis=1, copy=False, keys=self.columns)
cons = cast(Type["DataFrame"], self._constructor)
result = cons(result)
result = result.__finalize__(self, method="convert_dtypes")
# https://github.com/python/mypy/issues/8354
return cast(NDFrameT, result)
else:
return self.copy(deep=None)
# ----------------------------------------------------------------------
# Filling NA's
def fillna(
self: NDFrameT,
value: Hashable | Mapping | Series | DataFrame = ...,
*,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: Literal[False] = ...,
limit: int | None = ...,
downcast: dict | None = ...,
) -> NDFrameT:
...
def fillna(
self,
value: Hashable | Mapping | Series | DataFrame = ...,
*,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: Literal[True],
limit: int | None = ...,
downcast: dict | None = ...,
) -> None:
...
def fillna(
self: NDFrameT,
value: Hashable | Mapping | Series | DataFrame = ...,
*,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: bool_t = ...,
limit: int | None = ...,
downcast: dict | None = ...,
) -> NDFrameT | None:
...
def fillna(
self: NDFrameT,
value: Hashable | Mapping | Series | DataFrame = None,
*,
method: FillnaOptions | None = None,
axis: Axis | None = None,
inplace: bool_t = False,
limit: int | None = None,
downcast: dict | None = None,
) -> NDFrameT | None:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series:
* ffill: propagate last valid observation forward to next valid.
* backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values. For `Series`
this parameter is unused and defaults to 0.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, np.nan],
... [np.nan, 3, np.nan, 4]],
... columns=list("ABCD"))
>>> df
A B C D
0 NaN 2.0 NaN 0.0
1 3.0 4.0 NaN 1.0
2 NaN NaN NaN NaN
3 NaN 3.0 NaN 4.0
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0.0
1 3.0 4.0 0.0 1.0
2 0.0 0.0 0.0 0.0
3 0.0 3.0 0.0 4.0
We can also propagate non-null values forward or backward.
>>> df.fillna(method="ffill")
A B C D
0 NaN 2.0 NaN 0.0
1 3.0 4.0 NaN 1.0
2 3.0 4.0 NaN 1.0
3 3.0 3.0 NaN 4.0
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0.0
1 3.0 4.0 2.0 1.0
2 0.0 1.0 2.0 3.0
3 0.0 3.0 2.0 4.0
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0.0
1 3.0 4.0 NaN 1.0
2 NaN 1.0 NaN 3.0
3 NaN 3.0 NaN 4.0
When filling using a DataFrame, replacement happens along
the same column names and same indices
>>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE"))
>>> df.fillna(df2)
A B C D
0 0.0 2.0 0.0 0.0
1 3.0 4.0 0.0 1.0
2 0.0 0.0 0.0 NaN
3 0.0 3.0 0.0 4.0
Note that column D is not affected since it is not present in df2.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if not self._mgr.is_single_block and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
return result
new_data = self._mgr.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
downcast=downcast,
)
else:
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
if not len(value):
# test_fillna_nonscalar
if inplace:
return None
return self.copy(deep=None)
from pandas import Series
value = Series(value)
value = value.reindex(self.index, copy=False)
value = value._values
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._mgr.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
if using_copy_on_write():
result = self.copy(deep=None)
else:
result = self if inplace else self.copy()
is_dict = isinstance(downcast, dict)
for k, v in value.items():
if k not in result:
continue
# error: Item "None" of "Optional[Dict[Any, Any]]" has no
# attribute "get"
downcast_k = (
downcast
if not is_dict
else downcast.get(k) # type: ignore[union-attr]
)
res_k = result[k].fillna(v, limit=limit, downcast=downcast_k)
if not inplace:
result[k] = res_k
else:
# We can write into our existing column(s) iff dtype
# was preserved.
if isinstance(res_k, ABCSeries):
# i.e. 'k' only shows up once in self.columns
if res_k.dtype == result[k].dtype:
result.loc[:, k] = res_k
else:
# Different dtype -> no way to do inplace.
result[k] = res_k
else:
# see test_fillna_dict_inplace_nonunique_columns
locs = result.columns.get_loc(k)
if isinstance(locs, slice):
locs = np.arange(self.shape[1])[locs]
elif (
isinstance(locs, np.ndarray) and locs.dtype.kind == "b"
):
locs = locs.nonzero()[0]
elif not (
isinstance(locs, np.ndarray) and locs.dtype.kind == "i"
):
# Should never be reached, but let's cover our bases
raise NotImplementedError(
"Unexpected get_loc result, please report a bug at "
"https://github.com/pandas-dev/pandas"
)
for i, loc in enumerate(locs):
res_loc = res_k.iloc[:, i]
target = self.iloc[:, loc]
if res_loc.dtype == target.dtype:
result.iloc[:, loc] = res_loc
else:
result.isetitem(loc, res_loc)
if inplace:
return self._update_inplace(result)
else:
return result
elif not is_list_like(value):
if axis == 1:
result = self.T.fillna(value=value, limit=limit).T
new_data = result
else:
new_data = self._mgr.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)._mgr
else:
raise ValueError(f"invalid fill value with a {type(value)}")
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="fillna")
def ffill(
self: NDFrameT,
*,
axis: None | Axis = ...,
inplace: Literal[False] = ...,
limit: None | int = ...,
downcast: dict | None = ...,
) -> NDFrameT:
...
def ffill(
self,
*,
axis: None | Axis = ...,
inplace: Literal[True],
limit: None | int = ...,
downcast: dict | None = ...,
) -> None:
...
def ffill(
self: NDFrameT,
*,
axis: None | Axis = ...,
inplace: bool_t = ...,
limit: None | int = ...,
downcast: dict | None = ...,
) -> NDFrameT | None:
...
def ffill(
self: NDFrameT,
*,
axis: None | Axis = None,
inplace: bool_t = False,
limit: None | int = None,
downcast: dict | None = None,
) -> NDFrameT | None:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def pad(
self: NDFrameT,
*,
axis: None | Axis = None,
inplace: bool_t = False,
limit: None | int = None,
downcast: dict | None = None,
) -> NDFrameT | None:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
.. deprecated:: 2.0
{klass}.pad is deprecated. Use {klass}.ffill instead.
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
"""
warnings.warn(
"DataFrame.pad/Series.pad is deprecated. Use "
"DataFrame.ffill/Series.ffill instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast)
def bfill(
self: NDFrameT,
*,
axis: None | Axis = ...,
inplace: Literal[False] = ...,
limit: None | int = ...,
downcast: dict | None = ...,
) -> NDFrameT:
...
def bfill(
self,
*,
axis: None | Axis = ...,
inplace: Literal[True],
limit: None | int = ...,
downcast: dict | None = ...,
) -> None:
...
def bfill(
self: NDFrameT,
*,
axis: None | Axis = ...,
inplace: bool_t = ...,
limit: None | int = ...,
downcast: dict | None = ...,
) -> NDFrameT | None:
...
def bfill(
self: NDFrameT,
*,
axis: None | Axis = None,
inplace: bool_t = False,
limit: None | int = None,
downcast: dict | None = None,
) -> NDFrameT | None:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def backfill(
self: NDFrameT,
*,
axis: None | Axis = None,
inplace: bool_t = False,
limit: None | int = None,
downcast: dict | None = None,
) -> NDFrameT | None:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
.. deprecated:: 2.0
{klass}.backfill is deprecated. Use {klass}.bfill instead.
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
"""
warnings.warn(
"DataFrame.backfill/Series.backfill is deprecated. Use "
"DataFrame.bfill/Series.bfill instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast)
def replace(
self: NDFrameT,
to_replace=...,
value=...,
*,
inplace: Literal[False] = ...,
limit: int | None = ...,
regex: bool_t = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
) -> NDFrameT:
...
def replace(
self,
to_replace=...,
value=...,
*,
inplace: Literal[True],
limit: int | None = ...,
regex: bool_t = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
) -> None:
...
def replace(
self: NDFrameT,
to_replace=...,
value=...,
*,
inplace: bool_t = ...,
limit: int | None = ...,
regex: bool_t = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
) -> NDFrameT | None:
...
_shared_docs["replace"],
klass=_shared_doc_kwargs["klass"],
inplace=_shared_doc_kwargs["inplace"],
replace_iloc=_shared_doc_kwargs["replace_iloc"],
)
def replace(
self: NDFrameT,
to_replace=None,
value=lib.no_default,
*,
inplace: bool_t = False,
limit: int | None = None,
regex: bool_t = False,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default,
) -> NDFrameT | None:
if not (
is_scalar(to_replace)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool")
if value is lib.no_default or method is not lib.no_default:
# GH#36984 if the user explicitly passes value=None we want to
# respect that. We have the corner case where the user explicitly
# passes value=None *and* a method, which we interpret as meaning
# they want the (documented) default behavior.
if method is lib.no_default:
# TODO: get this to show up as the default in the docs?
method = "pad"
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
# TODO: Consider copy-on-write for non-replaced columns's here
if isinstance(self, ABCDataFrame):
from pandas import Series
result = self.apply(
Series._replace_single,
args=(to_replace, method, inplace, limit),
)
if inplace:
return None
return result
return self._replace_single(to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
if items:
keys, values = zip(*items)
else:
keys, values = ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
if inplace:
return None
return self.copy(deep=None)
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
# Note: Checking below for `in foo.keys()` instead of
# `in foo` is needed for when we have a Series and not dict
mapping = {
col: (to_replace[col], value[col])
for col in to_replace.keys()
if col in value.keys() and col in self
}
return self._replace_columnwise(mapping, inplace, regex)
# {'A': NA} -> 0
elif not is_list_like(value):
# Operate column-wise
if self.ndim == 1:
raise ValueError(
"Series.replace cannot use dict-like to_replace "
"and non-None value"
)
mapping = {
col: (to_rep, value) for col, to_rep in to_replace.items()
}
return self._replace_columnwise(mapping, inplace, regex)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace):
if not is_list_like(value):
# e.g. to_replace = [NA, ''] and value is 0,
# so we replace NA with 0 and then replace '' with 0
value = [value] * len(to_replace)
# e.g. we have to_replace = [NA, ''] and value = [0, 'missing']
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._mgr.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
# Operate column-wise
if self.ndim == 1:
raise ValueError(
"Series.replace cannot use dict-value and "
"non-None to_replace"
)
mapping = {col: (to_replace, val) for col, val in value.items()}
return self._replace_columnwise(mapping, inplace, regex)
elif not is_list_like(value): # NA -> 0
regex = should_use_regex(regex, to_replace)
if regex:
new_data = self._mgr.replace_regex(
to_replace=to_replace,
value=value,
inplace=inplace,
)
else:
new_data = self._mgr.replace(
to_replace=to_replace, value=value, inplace=inplace
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="replace")
def interpolate(
self: NDFrameT,
method: str = "linear",
*,
axis: Axis = 0,
limit: int | None = None,
inplace: bool_t = False,
limit_direction: str | None = None,
limit_area: str | None = None,
downcast: str | None = None,
**kwargs,
) -> NDFrameT | None:
"""
Fill NaN values using an interpolation method.
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`, whereas 'spline' is passed to
`scipy.interpolate.UnivariateSpline`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``. Note that,
`slinear` method in Pandas refers to the Scipy first order `spline`
instead of Pandas first order `spline`.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima',
'cubicspline': Wrappers around the SciPy interpolation methods of
similar names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Axis to interpolate along. For `Series` this parameter is unused
and defaults to 0.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {{'forward', 'backward', 'both'}}, Optional
Consecutive NaNs will be filled in this direction.
If limit is specified:
* If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'.
* If 'method' is 'backfill' or 'bfill', 'limit_direction' must be
'backwards'.
If 'limit' is not specified:
* If 'method' is 'backfill' or 'bfill', the default is 'backward'
* else the default is 'forward'
.. versionchanged:: 1.1.0
raises ValueError if `limit_direction` is 'forward' or 'both' and
method is 'backfill' or 'bfill'.
raises ValueError if `limit_direction` is 'backward' or 'both' and
method is 'pad' or 'ffill'.
limit_area : {{`None`, 'inside', 'outside'}}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
``**kwargs`` : optional
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame or None
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values or None if ``inplace=True``.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
fillna_methods = ["ffill", "bfill", "pad", "backfill"]
should_transpose = axis == 1 and method not in fillna_methods
obj = self.T if should_transpose else self
if obj.empty:
return self.copy()
if method not in fillna_methods:
axis = self._info_axis_number
if isinstance(obj.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
# Set `limit_direction` depending on `method`
if limit_direction is None:
limit_direction = (
"backward" if method in ("backfill", "bfill") else "forward"
)
else:
if method in ("pad", "ffill") and limit_direction != "forward":
raise ValueError(
f"`limit_direction` must be 'forward' for method `{method}`"
)
if method in ("backfill", "bfill") and limit_direction != "backward":
raise ValueError(
f"`limit_direction` must be 'backward' for method `{method}`"
)
if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = Index(np.arange(len(obj.index)))
else:
index = obj.index
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index.dtype)
or is_datetime64_any_dtype(index.dtype)
or is_timedelta64_dtype(index.dtype)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
new_data = obj._mgr.interpolate(
method=method,
axis=axis,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
result = self._constructor(new_data)
if should_transpose:
result = result.T
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="interpolate")
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30 NaN
2018-02-27 09:04:30 40 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic_increasing:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
return self._constructor_sliced(
index=self.columns, name=where, dtype=np.float64
)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(axis=1)
if nulls.all():
if is_series:
self = cast("Series", self)
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
self = cast("DataFrame", self)
return self._constructor(np.nan, index=where, columns=self.columns)
else:
self = cast("DataFrame", self)
return self._constructor_sliced(
np.nan, index=self.columns, name=where[0]
)
locs = self.index.asof_locs(where, ~(nulls._values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
if missing.any():
# GH#16063 only do this setting when necessary, otherwise
# we'd cast e.g. bools to floats
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
def isna(self: NDFrameT) -> NDFrameT:
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
{klass}
Mask of bool values for each element in {klass} that
indicates whether an element is an NA value.
See Also
--------
{klass}.isnull : Alias of isna.
{klass}.notna : Boolean inverse of isna.
{klass}.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.NaN],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
return isna(self).__finalize__(self, method="isna")
def isnull(self: NDFrameT) -> NDFrameT:
return isna(self).__finalize__(self, method="isnull")
def notna(self: NDFrameT) -> NDFrameT:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
{klass}
Mask of bool values for each element in {klass} that
indicates whether an element is not an NA value.
See Also
--------
{klass}.notnull : Alias of notna.
{klass}.isna : Boolean inverse of notna.
{klass}.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.NaN],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
return notna(self).__finalize__(self, method="notna")
def notnull(self: NDFrameT) -> NDFrameT:
return notna(self).__finalize__(self, method="notnull")
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self._values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
return self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = align_method_FRAME(self, threshold, axis, flex=None)[1]
# GH 40420
# Treat missing thresholds as no bounds, not clipping the values
if is_list_like(threshold):
fill_value = np.inf if method.__name__ == "le" else -np.inf
threshold_inf = threshold.fillna(fill_value)
else:
threshold_inf = threshold
subset = method(threshold_inf, axis=axis) | isna(self)
# GH 40420
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: NDFrameT,
lower=None,
upper=None,
*,
axis: Axis | None = None,
inplace: bool_t = False,
**kwargs,
) -> NDFrameT | None:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array-like, default None
Minimum threshold value. All values below this
threshold will be set to it. A missing
threshold (e.g `NA`) will not clip the value.
upper : float or array-like, default None
Maximum threshold value. All values above this
threshold will be set to it. A missing
threshold (e.g `NA`) will not clip the value.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Align object with lower and upper along the given axis.
For `Series` this parameter is unused and defaults to `None`.
inplace : bool, default False
Whether to perform the operation in place on the data.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame or None
Same type as calling object with the values outside the
clip boundaries replaced or None if ``inplace=True``.
See Also
--------
Series.clip : Trim values at input threshold in series.
DataFrame.clip : Trim values at input threshold in dataframe.
numpy.clip : Clip (limit) the values in an array.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
Clips using specific lower threshold per column element, with missing values:
>>> t = pd.Series([2, -4, np.NaN, 6, 3])
>>> t
0 2.0
1 -4.0
2 NaN
3 6.0
4 3.0
dtype: float64
>>> df.clip(t, axis=0)
col_0 col_1
0 9 2
1 -3 -4
2 0 6
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, (), kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
isna_lower = isna(lower)
if not is_list_like(lower):
if np.any(isna_lower):
lower = None
elif np.all(isna_lower):
lower = None
isna_upper = isna(upper)
if not is_list_like(upper):
if np.any(isna_upper):
upper = None
elif np.all(isna_upper):
upper = None
# GH 2747 (arguments were reversed)
if (
lower is not None
and upper is not None
and is_scalar(lower)
and is_scalar(upper)
):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
def asfreq(
self: NDFrameT,
freq: Frequency,
method: FillnaOptions | None = None,
how: str | None = None,
normalize: bool_t = False,
fill_value: Hashable = None,
) -> NDFrameT:
"""
Convert time series to specified frequency.
Returns the original data conformed to a new index with the specified
frequency.
If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index
is the result of transforming the original index with
:meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index
will map one-to-one to the new index).
Otherwise, the new index will be equivalent to ``pd.date_range(start, end,
freq=freq)`` where ``start`` and ``end`` are, respectively, the first and
last entries in the original index (see :func:`pandas.date_range`). The
values corresponding to any timesteps in the new index which were not present
in the original index will be null (``NaN``), unless a method for filling
such unknowns is provided (see the ``method`` parameter below).
The :meth:`resample` method is more appropriate if an operation on each group of
timesteps (such as an aggregate) is necessary to represent the data at the new
frequency.
Parameters
----------
freq : DateOffset or str
Frequency DateOffset or string.
method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {{'start', 'end'}}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
{klass}
{klass} object reindexed to the specified frequency.
See Also
--------
reindex : Conform DataFrame to new index with optional filling logic.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({{'s': series}})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: NDFrameT, time, asof: bool_t = False, axis: Axis | None = None
) -> NDFrameT:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
The values to select.
axis : {0 or 'index', 1 or 'columns'}, default 0
For `Series` this parameter is unused and defaults to 0.
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
if not isinstance(index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
indexer = index.indexer_at_time(time, asof=asof)
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: NDFrameT,
start_time,
end_time,
inclusive: IntervalClosedType = "both",
axis: Axis | None = None,
) -> NDFrameT:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
inclusive : {"both", "neither", "left", "right"}, default "both"
Include boundaries; whether to set each bound as closed or open.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
For `Series` this parameter is unused and defaults to 0.
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
if not isinstance(index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
left_inclusive, right_inclusive = validate_inclusive(inclusive)
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=left_inclusive,
include_end=right_inclusive,
)
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis: Axis = 0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
on: Level = None,
level: Level = None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
group_keys: bool_t = False,
) -> Resampler:
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time series.
The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`,
or `TimedeltaIndex`), or the caller must pass the label of a datetime-like
series/index to the ``on``/``level`` keyword parameter.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
Which axis to use for up- or down-sampling. For `Series` this parameter
is unused and defaults to 0. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {{'right', 'left'}}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {{'right', 'left'}}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {{'start', 'end', 's', 'e'}}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {{'timestamp', 'period'}}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
origin : Timestamp or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If string, must be one of the following:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
.. versionadded:: 1.1.0
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
.. versionadded:: 1.3.0
offset : Timedelta or str, default is None
An offset timedelta added to the origin.
.. versionadded:: 1.1.0
group_keys : bool, default False
Whether to include the group keys in the result index when using
``.apply()`` on the resampled object.
.. versionadded:: 1.5.0
Not specifying ``group_keys`` will retain values-dependent behavior
from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes
<whatsnew_150.enhancements.resample_group_keys>` for examples).
.. versionchanged:: 2.0.0
``group_keys`` now defaults to ``False``.
Returns
-------
pandas.core.Resampler
:class:`~pandas.core.Resampler` object.
See Also
--------
Series.resample : Resample a Series.
DataFrame.resample : Resample a DataFrame.
groupby : Group {klass} by mapping, function, label, or list of labels.
asfreq : Reindex a {klass} with the given frequency without grouping.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``ffill`` method.
>>> series.resample('30S').ffill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(arraylike):
... return np.sum(arraylike) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}}
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}}
>>> df2 = pd.DataFrame(
... d2,
... index=pd.MultiIndex.from_product(
... [days, ['morning', 'afternoon']]
... )
... )
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
If you want to adjust the start of the bins based on a fixed timestamp:
>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
>>> rng = pd.date_range(start, end, freq='7min')
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
2000-10-01 23:37:00 3
2000-10-01 23:44:00 6
2000-10-01 23:51:00 9
2000-10-01 23:58:00 12
2000-10-02 00:05:00 15
2000-10-02 00:12:00 18
2000-10-02 00:19:00 21
2000-10-02 00:26:00 24
Freq: 7T, dtype: int64
>>> ts.resample('17min').sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
2000-10-02 00:05:00 54
2000-10-02 00:22:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', origin='epoch').sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
2000-10-02 00:09:00 39
2000-10-02 00:26:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', origin='2000-01-01').sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
2000-10-02 00:15:00 45
Freq: 17T, dtype: int64
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
>>> ts.resample('17min', origin='start').sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', offset='23h30min').sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
If you want to take the largest Timestamp as the end of the bins:
>>> ts.resample('17min', origin='end').sum()
2000-10-01 23:35:00 0
2000-10-01 23:52:00 18
2000-10-02 00:09:00 27
2000-10-02 00:26:00 63
Freq: 17T, dtype: int64
In contrast with the `start_day`, you can use `end_day` to take the ceiling
midnight of the largest Timestamp as the end of the bins and drop the bins
not containing data:
>>> ts.resample('17min', origin='end_day').sum()
2000-10-01 23:38:00 3
2000-10-01 23:55:00 15
2000-10-02 00:12:00 45
2000-10-02 00:29:00 45
Freq: 17T, dtype: int64
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
cast("Series | DataFrame", self),
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
convention=convention,
key=on,
level=level,
origin=origin,
offset=offset,
group_keys=group_keys,
)
def first(self: NDFrameT, offset) -> NDFrameT:
"""
Select initial periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the first few rows based on a date offset.
Parameters
----------
offset : str, DateOffset or dateutil.relativedelta
The offset length of the data that will be selected. For instance,
'1M' will display all the rows having their index within the first month.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calendar days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self.copy(deep=False)
offset = to_offset(offset)
if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]):
# GH#29623 if first value is end of period, remove offset with n = 1
# before adding the real offset
end_date = end = self.index[0] - offset.base + offset
else:
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if isinstance(offset, Tick) and end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: NDFrameT, offset) -> NDFrameT:
"""
Select final periods of time series data based on a date offset.
For a DataFrame with a sorted DatetimeIndex, this function
selects the last few rows based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self.copy(deep=False)
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: NDFrameT,
axis: Axis = 0,
method: str = "average",
numeric_only: bool_t = False,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> NDFrameT:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
For `Series` this parameter is unused and defaults to 0.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, default False
For DataFrame objects, rank only numeric columns if set to True.
.. versionchanged:: 2.0.0
The default value of ``numeric_only`` is now ``False``.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign lowest rank to NaN values
* bottom: assign highest rank to NaN values
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.DataFrameGroupBy.rank : Rank of values within each group.
core.groupby.SeriesGroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
Ties are assigned the mean of the ranks (by default) for the group.
>>> s = pd.Series(range(5), index=list("abcde"))
>>> s["d"] = s["b"]
>>> s.rank()
a 1.0
b 2.5
c 4.0
d 2.5
e 5.0
dtype: float64
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis_int = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
if data.ndim == 2:
# i.e. DataFrame, we cast to ndarray
values = data.values
else:
# i.e. Series, can dispatch to EA
values = data._values
if isinstance(values, ExtensionArray):
ranks = values._rank(
axis=axis_int,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
else:
ranks = algos.rank(
values,
axis=axis_int,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks_obj = self._constructor(ranks, **data._construct_axes_dict())
return ranks_obj.__finalize__(self, method="rank")
if numeric_only:
if self.ndim == 1 and not is_numeric_dtype(self.dtype):
# GH#47500
raise TypeError(
"Series.rank does not allow numeric_only=True with "
"non-numeric dtype."
)
data = self._get_numeric_data()
else:
data = self
return ranker(data)
def compare(
self,
other,
align_axis: Axis = 1,
keep_shape: bool_t = False,
keep_equal: bool_t = False,
result_names: Suffixes = ("self", "other"),
):
if type(self) is not type(other):
cls_self, cls_other = type(self).__name__, type(other).__name__
raise TypeError(
f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'"
)
mask = ~((self == other) | (self.isna() & other.isna()))
mask.fillna(True, inplace=True)
if not keep_equal:
self = self.where(mask)
other = other.where(mask)
if not keep_shape:
if isinstance(self, ABCDataFrame):
cmask = mask.any()
rmask = mask.any(axis=1)
self = self.loc[rmask, cmask]
other = other.loc[rmask, cmask]
else:
self = self[mask]
other = other[mask]
if not isinstance(result_names, tuple):
raise TypeError(
f"Passing 'result_names' as a {type(result_names)} is not "
"supported. Provide 'result_names' as a tuple instead."
)
if align_axis in (1, "columns"): # This is needed for Series
axis = 1
else:
axis = self._get_axis_number(align_axis)
diff = concat([self, other], axis=axis, keys=result_names)
if axis >= self.ndim:
# No need to reorganize data if stacking on new axis
# This currently applies for stacking two Series on columns
return diff
ax = diff._get_axis(axis)
ax_names = np.array(ax.names)
# set index names to positions to avoid confusion
ax.names = np.arange(len(ax_names))
# bring self-other to inner level
order = list(range(1, ax.nlevels)) + [0]
if isinstance(diff, ABCDataFrame):
diff = diff.reorder_levels(order, axis=axis)
else:
diff = diff.reorder_levels(order)
# restore the index names in order
diff._get_axis(axis=axis).names = ax_names[order]
# reorder axis to keep things organized
indices = (
np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten()
)
diff = diff.take(indices, axis=axis)
return diff
def align(
self: NDFrameT,
other: NDFrameT,
join: AlignJoin = "outer",
axis: Axis | None = None,
level: Level = None,
copy: bool_t | None = None,
fill_value: Hashable = None,
method: FillnaOptions | None = None,
limit: int | None = None,
fill_axis: Axis = 0,
broadcast_axis: Axis | None = None,
) -> NDFrameT:
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : {axes_single_arg}, default 0
Filling axis, method and limit.
broadcast_axis : {axes_single_arg}, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
tuple of ({klass}, type of other)
Aligned objects.
Examples
--------
>>> df = pd.DataFrame(
... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2]
... )
>>> other = pd.DataFrame(
... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]],
... columns=["A", "B", "C", "D"],
... index=[2, 3, 4],
... )
>>> df
D B E A
1 1 2 3 4
2 6 7 8 9
>>> other
A B C D
2 10 20 30 40
3 60 70 80 90
4 600 700 800 900
Align on columns:
>>> left, right = df.align(other, join="outer", axis=1)
>>> left
A B C D E
1 4 2 NaN 1 3
2 9 7 NaN 6 8
>>> right
A B C D E
2 10 20 30 40 NaN
3 60 70 80 90 NaN
4 600 700 800 900 NaN
We can also align on the index:
>>> left, right = df.align(other, join="outer", axis=0)
>>> left
D B E A
1 1.0 2.0 3.0 4.0
2 6.0 7.0 8.0 9.0
3 NaN NaN NaN NaN
4 NaN NaN NaN NaN
>>> right
A B C D
1 NaN NaN NaN NaN
2 10.0 20.0 30.0 40.0
3 60.0 70.0 80.0 90.0
4 600.0 700.0 800.0 900.0
Finally, the default `axis=None` will align on both index and columns:
>>> left, right = df.align(other, join="outer", axis=None)
>>> left
A B C D E
1 4.0 2.0 NaN 1.0 3.0
2 9.0 7.0 NaN 6.0 8.0
3 NaN NaN NaN NaN NaN
4 NaN NaN NaN NaN NaN
>>> right
A B C D E
1 NaN NaN NaN NaN NaN
2 10.0 20.0 30.0 40.0 NaN
3 60.0 70.0 80.0 90.0 NaN
4 600.0 700.0 800.0 900.0 NaN
"""
method = clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join: AlignJoin = "outer",
axis: Axis | None = None,
level=None,
copy: bool_t | None = None,
fill_value=None,
method=None,
limit=None,
fill_axis: Axis = 0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if (axis is None or axis == 0) and not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if (
(axis is None or axis == 1)
and not is_series
and not self.columns.equals(other.columns)
):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
_left = left.fillna(method=method, axis=fill_axis, limit=limit)
assert _left is not None # needed for mypy
left = _left
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
left, right = _align_as_utc(left, right, join_index)
return (
left.__finalize__(self),
right.__finalize__(other),
)
def _align_series(
self,
other,
join: AlignJoin = "outer",
axis: Axis | None = None,
level=None,
copy: bool_t | None = None,
fill_value=None,
method=None,
limit=None,
fill_axis: Axis = 0,
):
is_series = isinstance(self, ABCSeries)
if copy and using_copy_on_write():
copy = False
if (not is_series and axis is None) or axis not in [None, 0, 1]:
raise ValueError("Must specify axis=0 or 1")
if is_series and axis == 1:
raise ValueError("cannot align series to a series other than axis 0")
# series/series compat, other must always be a Series
if not axis:
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if is_series:
left = self._reindex_indexer(join_index, lidx, copy)
elif lidx is None or join_index is None:
left = self.copy(deep=copy)
else:
left = self._constructor(
self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy)
)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._mgr
join_index = self.axes[1]
lidx, ridx = None, None
if not join_index.equals(other.index):
join_index, lidx, ridx = join_index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
bm_axis = self._get_block_manager_axis(1)
fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis)
if copy and fdata is self._mgr:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other.copy(deep=copy)
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
left, right = _align_as_utc(left, right, join_index)
return (
left.__finalize__(self),
right.__finalize__(other),
)
def _where(
self,
cond,
other=lib.no_default,
inplace: bool_t = False,
axis: Axis | None = None,
level=None,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if axis is not None:
axis = self._get_axis_number(axis)
# align the cond to same shape as myself
cond = common.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
# CoW: Make sure reference is not kept alive
cond = cond.align(self, join="right", broadcast_axis=1, copy=False)[0]
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict(), copy=False)
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not cond.empty:
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
else:
for _dt in cond.dtypes:
if not is_bool_dtype(_dt):
raise ValueError(msg.format(dtype=_dt))
else:
# GH#21947 we have an empty DataFrame/Series, could be object-dtype
cond = cond.astype(bool)
cond = -cond if inplace else cond
cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False)
# try to align with other
if isinstance(other, NDFrame):
# align with me
if other.ndim <= self.ndim:
# CoW: Make sure reference is not kept alive
other = self.align(
other,
join="left",
axis=axis,
level=level,
fill_value=None,
copy=False,
)[1]
# if we are NOT aligned, raise as we cannot where index
if axis is None and not other._indexed_same(self):
raise InvalidIndexError
if other.ndim < self.ndim:
# TODO(EA2D): avoid object-dtype cast in EA case GH#38729
other = other._values
if axis == 0:
other = np.reshape(other, (-1, 1))
elif axis == 1:
other = np.reshape(other, (1, -1))
other = np.broadcast_to(other, self.shape)
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
elif not isinstance(other, (MultiIndex, NDFrame)):
# mainly just catching Index here
other = extract_array(other, extract_numpy=True)
if isinstance(other, (np.ndarray, ExtensionArray)):
if other.shape != self.shape:
if self.ndim != 1:
# In the ndim == 1 case we may have
# other length 1, which we treat as scalar (GH#2745, GH#4192)
# or len(other) == icond.sum(), which we treat like
# __setitem__ (GH#3235)
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(
other, **self._construct_axes_dict(), copy=False
)
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._mgr.putmask(mask=cond, new=other, align=align)
result = self._constructor(new_data)
return self._update_inplace(result)
else:
new_data = self._mgr.where(
other=other,
cond=cond,
align=align,
)
result = self._constructor(new_data)
return result.__finalize__(self)
def where(
self: NDFrameT,
cond,
other=...,
*,
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level: Level = ...,
) -> NDFrameT:
...
def where(
self,
cond,
other=...,
*,
inplace: Literal[True],
axis: Axis | None = ...,
level: Level = ...,
) -> None:
...
def where(
self: NDFrameT,
cond,
other=...,
*,
inplace: bool_t = ...,
axis: Axis | None = ...,
level: Level = ...,
) -> NDFrameT | None:
...
klass=_shared_doc_kwargs["klass"],
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
def where(
self: NDFrameT,
cond,
other=np.nan,
*,
inplace: bool_t = False,
axis: Axis | None = None,
level: Level = None,
) -> NDFrameT | None:
"""
Replace values where the condition is {cond_rev}.
Parameters
----------
cond : bool {klass}, array-like, or callable
Where `cond` is {cond}, keep the original value. Where
{cond_rev}, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the {klass} and
should return boolean {klass} or array. The callable must
not change input {klass} (though pandas doesn't check it).
other : scalar, {klass}, or callable
Entries where `cond` is {cond_rev} are replaced with
corresponding value from `other`.
If other is callable, it is computed on the {klass} and
should return scalar or {klass}. The callable must not
change input {klass} (though pandas doesn't check it).
If not specified, entries will be filled with the corresponding
NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension
dtypes).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed. For `Series` this parameter is
unused and defaults to 0.
level : int, default None
Alignment level if needed.
Returns
-------
Same type as caller or None if ``inplace=True``.
See Also
--------
:func:`DataFrame.{name_other}` : Return an object of same shape as
self.
Notes
-----
The {name} method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``{cond}`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used. If the axis of ``other`` does not align with axis of
``cond`` {klass}, the misaligned index positions will be filled with
{cond_rev}.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``{name}`` documentation in
:ref:`indexing <indexing.where_mask>`.
The dtype of the object takes precedence. The fill value is casted to
the object's dtype, if this can be done losslessly.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s = pd.Series(range(5))
>>> t = pd.Series([True, False])
>>> s.where(t, 99)
0 0
1 99
2 99
3 99
4 99
dtype: int64
>>> s.mask(t, 99)
0 99
1 1
2 99
3 99
4 99
dtype: int64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> s.mask(s > 1, 10)
0 0
1 1
2 10
3 10
4 10
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df % 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
other = common.apply_if_callable(other, self)
return self._where(cond, other, inplace, axis, level)
def mask(
self: NDFrameT,
cond,
other=...,
*,
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level: Level = ...,
) -> NDFrameT:
...
def mask(
self,
cond,
other=...,
*,
inplace: Literal[True],
axis: Axis | None = ...,
level: Level = ...,
) -> None:
...
def mask(
self: NDFrameT,
cond,
other=...,
*,
inplace: bool_t = ...,
axis: Axis | None = ...,
level: Level = ...,
) -> NDFrameT | None:
...
where,
klass=_shared_doc_kwargs["klass"],
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
def mask(
self: NDFrameT,
cond,
other=lib.no_default,
*,
inplace: bool_t = False,
axis: Axis | None = None,
level: Level = None,
) -> NDFrameT | None:
inplace = validate_bool_kwarg(inplace, "inplace")
cond = common.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
)
def shift(
self: NDFrameT,
periods: int = 1,
freq=None,
axis: Axis = 0,
fill_value: Hashable = None,
) -> NDFrameT:
"""
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`. `freq` can be inferred
when specified as "infer" as long as either freq or inferred_freq
attribute is set in the index.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
If `freq` is specified as "infer" then it will be inferred from
the freq or inferred_freq attributes of the index. If neither of
those attributes exist, a ValueError is thrown.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Shift direction. For `Series` this parameter is unused and defaults to 0.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 1.1.0
Returns
-------
{klass}
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
Examples
--------
>>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45],
... "Col2": [13, 23, 18, 33, 48],
... "Col3": [17, 27, 22, 37, 52]}},
... index=pd.date_range("2020-01-01", "2020-01-05"))
>>> df
Col1 Col2 Col3
2020-01-01 10 13 17
2020-01-02 20 23 27
2020-01-03 15 18 22
2020-01-04 30 33 37
2020-01-05 45 48 52
>>> df.shift(periods=3)
Col1 Col2 Col3
2020-01-01 NaN NaN NaN
2020-01-02 NaN NaN NaN
2020-01-03 NaN NaN NaN
2020-01-04 10.0 13.0 17.0
2020-01-05 20.0 23.0 27.0
>>> df.shift(periods=1, axis="columns")
Col1 Col2 Col3
2020-01-01 NaN 10 13
2020-01-02 NaN 20 23
2020-01-03 NaN 15 18
2020-01-04 NaN 30 33
2020-01-05 NaN 45 48
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
2020-01-01 0 0 0
2020-01-02 0 0 0
2020-01-03 0 0 0
2020-01-04 10 13 17
2020-01-05 20 23 27
>>> df.shift(periods=3, freq="D")
Col1 Col2 Col3
2020-01-04 10 13 17
2020-01-05 20 23 27
2020-01-06 15 18 22
2020-01-07 30 33 37
2020-01-08 45 48 52
>>> df.shift(periods=3, freq="infer")
Col1 Col2 Col3
2020-01-04 10 13 17
2020-01-05 20 23 27
2020-01-06 15 18 22
2020-01-07 30 33 37
2020-01-08 45 48 52
"""
if periods == 0:
return self.copy(deep=None)
if freq is None:
# when freq is None, data is shifted, index is not
axis = self._get_axis_number(axis)
new_data = self._mgr.shift(
periods=periods, axis=axis, fill_value=fill_value
)
return self._constructor(new_data).__finalize__(self, method="shift")
# when freq is given, index is shifted, data is not
index = self._get_axis(axis)
if freq == "infer":
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not set in the index hence cannot be inferred"
raise ValueError(msg)
elif isinstance(freq, str):
freq = to_offset(freq)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq != orig_freq:
assert orig_freq is not None # for mypy
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
new_ax = index.shift(periods)
else:
new_ax = index.shift(periods, freq)
result = self.set_axis(new_ax, axis=axis)
return result.__finalize__(self, method="shift")
def truncate(
self: NDFrameT,
before=None,
after=None,
axis: Axis | None = None,
copy: bool_t | None = None,
) -> NDFrameT:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
For `Series` this parameter is unused and defaults to 0.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax._is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None and before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1:
before, after = after, before
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
result = result.copy(deep=copy and not using_copy_on_write())
return result
def tz_convert(
self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None
) -> NDFrameT:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object or None
Target time zone. Passing ``None`` will convert to
UTC and remove the timezone information.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
The axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
{klass}
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
Examples
--------
Change to another time zone:
>>> s = pd.Series(
... [1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']),
... )
>>> s.tz_convert('Asia/Shanghai')
2018-09-15 07:30:00+08:00 1
dtype: int64
Pass None to convert to UTC and get a tz-naive index:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']))
>>> s.tz_convert(None)
2018-09-14 23:30:00 1
dtype: int64
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self.copy(deep=copy and not using_copy_on_write())
result = result.set_axis(ax, axis=axis, copy=False)
return result.__finalize__(self, method="tz_convert")
def tz_localize(
self: NDFrameT,
tz,
axis: Axis = 0,
level=None,
copy: bool_t | None = None,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
) -> NDFrameT:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo or None
Time zone to localize. Passing ``None`` will remove the
time zone information and preserve local time.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
The axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
Returns
-------
{klass}
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series(
... [1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']),
... )
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Pass None to convert to tz-naive index and preserve local time:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']))
>>> s.tz_localize(None)
2018-09-15 01:30:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backward with a timedelta object or `'shift_forward'`
or `'shift_backward'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, dt.timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self.copy(deep=copy and not using_copy_on_write())
result = result.set_axis(ax, axis=axis, copy=False)
return result.__finalize__(self, method="tz_localize")
# ----------------------------------------------------------------------
# Numeric Methods
def describe(
self: NDFrameT,
percentiles=None,
include=None,
exclude=None,
) -> NDFrameT:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
mean 2006-09-01 08:00:00
min 2000-01-01 00:00:00
25% 2004-12-31 12:00:00
50% 2010-01-01 00:00:00
75% 2010-01-01 00:00:00
max 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all') # doctest: +SKIP
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN a
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[object]) # doctest: +SKIP
object
count 3
unique 3
top a
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top d
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number]) # doctest: +SKIP
categorical object
count 3 3
unique 3 3
top f a
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[object]) # doctest: +SKIP
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
return describe_ndframe(
obj=self,
include=include,
exclude=exclude,
percentiles=percentiles,
)
def pct_change(
self: NDFrameT,
periods: int = 1,
fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad",
limit=None,
freq=None,
**kwargs,
) -> NDFrameT:
"""
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
How to handle NAs **before** computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns', periods=-1)
2016 2015 2014
GOOG 0.179241 0.094112 NaN
APPL -0.252395 -0.011860 NaN
"""
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
_data = self.fillna(method=fill_method, axis=axis, limit=limit)
assert _data is not None # needed for mypy
data = _data
shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs)
# Unsupported left operand type for / ("NDFrameT")
rs = data / shifted - 1 # type: ignore[operator]
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs.__finalize__(self, method="pct_change")
def _logical_func(
self,
name: str,
func,
axis: Axis = 0,
bool_only: bool_t = False,
skipna: bool_t = True,
**kwargs,
) -> Series | bool_t:
nv.validate_logical_func((), kwargs, fname=name)
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
if self.ndim > 1 and axis is None:
# Reduce along one dimension then the other, to simplify DataFrame._reduce
res = self._logical_func(
name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs
)
return res._logical_func(name, func, skipna=skipna, **kwargs)
if (
self.ndim > 1
and axis == 1
and len(self._mgr.arrays) > 1
# TODO(EA2D): special-case not needed
and all(x.ndim == 2 for x in self._mgr.arrays)
and not kwargs
):
# Fastpath avoiding potentially expensive transpose
obj = self
if bool_only:
obj = self._get_bool_data()
return obj._reduce_axis1(name, func, skipna=skipna)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
def any(
self,
axis: Axis = 0,
bool_only: bool_t = False,
skipna: bool_t = True,
**kwargs,
) -> DataFrame | Series | bool_t:
return self._logical_func(
"any", nanops.nanany, axis, bool_only, skipna, **kwargs
)
def all(
self,
axis: Axis = 0,
bool_only: bool_t = False,
skipna: bool_t = True,
**kwargs,
) -> Series | bool_t:
return self._logical_func(
"all", nanops.nanall, axis, bool_only, skipna, **kwargs
)
def _accum_func(
self,
name: str,
func,
axis: Axis | None = None,
skipna: bool_t = True,
*args,
**kwargs,
):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return self.T._accum_func(
name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026
).T
def block_accum_func(blk_values):
values = blk_values.T if hasattr(blk_values, "T") else blk_values
result: np.ndarray | ExtensionArray
if isinstance(values, ExtensionArray):
result = values._accumulate(name, skipna=skipna, **kwargs)
else:
result = nanops.na_accum_func(values, func, skipna=skipna)
result = result.T if hasattr(result, "T") else result
return result
result = self._mgr.apply(block_accum_func)
return self._constructor(result).__finalize__(self, method=name)
def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs):
return self._accum_func(
"cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs
)
def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs):
return self._accum_func(
"cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs
)
def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs):
return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs)
def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs):
return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs)
def _stat_function_ddof(
self,
name: str,
func,
axis: Axis | None = None,
skipna: bool_t = True,
ddof: int = 1,
numeric_only: bool_t = False,
**kwargs,
) -> Series | float:
nv.validate_stat_ddof_func((), kwargs, fname=name)
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
if axis is None:
axis = self._stat_axis_number
return self._reduce(
func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
def sem(
self,
axis: Axis | None = None,
skipna: bool_t = True,
ddof: int = 1,
numeric_only: bool_t = False,
**kwargs,
) -> Series | float:
return self._stat_function_ddof(
"sem", nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs
)
def var(
self,
axis: Axis | None = None,
skipna: bool_t = True,
ddof: int = 1,
numeric_only: bool_t = False,
**kwargs,
) -> Series | float:
return self._stat_function_ddof(
"var", nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs
)
def std(
self,
axis: Axis | None = None,
skipna: bool_t = True,
ddof: int = 1,
numeric_only: bool_t = False,
**kwargs,
) -> Series | float:
return self._stat_function_ddof(
"std", nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs
)
def _stat_function(
self,
name: str,
func,
axis: Axis | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
):
if name == "median":
nv.validate_median((), kwargs)
else:
nv.validate_stat_func((), kwargs, fname=name)
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
return self._reduce(
func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
def min(
self,
axis: Axis | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
):
return self._stat_function(
"min",
nanops.nanmin,
axis,
skipna,
numeric_only,
**kwargs,
)
def max(
self,
axis: Axis | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
):
return self._stat_function(
"max",
nanops.nanmax,
axis,
skipna,
numeric_only,
**kwargs,
)
def mean(
self,
axis: Axis | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
) -> Series | float:
return self._stat_function(
"mean", nanops.nanmean, axis, skipna, numeric_only, **kwargs
)
def median(
self,
axis: Axis | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
) -> Series | float:
return self._stat_function(
"median", nanops.nanmedian, axis, skipna, numeric_only, **kwargs
)
def skew(
self,
axis: Axis | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
) -> Series | float:
return self._stat_function(
"skew", nanops.nanskew, axis, skipna, numeric_only, **kwargs
)
def kurt(
self,
axis: Axis | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
) -> Series | float:
return self._stat_function(
"kurt", nanops.nankurt, axis, skipna, numeric_only, **kwargs
)
kurtosis = kurt
def _min_count_stat_function(
self,
name: str,
func,
axis: Axis | None = None,
skipna: bool_t = True,
numeric_only: bool_t = False,
min_count: int = 0,
**kwargs,
):
if name == "sum":
nv.validate_sum((), kwargs)
elif name == "prod":
nv.validate_prod((), kwargs)
else:
nv.validate_stat_func((), kwargs, fname=name)
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
if axis is None:
axis = self._stat_axis_number
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
def sum(
self,
axis: Axis | None = None,
skipna: bool_t = True,
numeric_only: bool_t = False,
min_count: int = 0,
**kwargs,
):
return self._min_count_stat_function(
"sum", nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs
)
def prod(
self,
axis: Axis | None = None,
skipna: bool_t = True,
numeric_only: bool_t = False,
min_count: int = 0,
**kwargs,
):
return self._min_count_stat_function(
"prod",
nanops.nanprod,
axis,
skipna,
numeric_only,
min_count,
**kwargs,
)
product = prod
def _add_numeric_operations(cls) -> None:
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name1, name2 = _doc_params(cls)
_bool_doc,
desc=_any_desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=_any_see_also,
examples=_any_examples,
empty_value=False,
)
def any(
self,
*,
axis: Axis = 0,
bool_only=None,
skipna: bool_t = True,
**kwargs,
):
return NDFrame.any(
self,
axis=axis,
bool_only=bool_only,
skipna=skipna,
**kwargs,
)
setattr(cls, "any", any)
_bool_doc,
desc=_all_desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=_all_see_also,
examples=_all_examples,
empty_value=True,
)
def all(
self,
axis: Axis = 0,
bool_only=None,
skipna: bool_t = True,
**kwargs,
):
return NDFrame.all(self, axis, bool_only, skipna, **kwargs)
setattr(cls, "all", all)
_num_ddof_doc,
desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
name1=name1,
name2=name2,
axis_descr=axis_descr,
notes="",
examples="",
)
def sem(
self,
axis: Axis | None = None,
skipna: bool_t = True,
ddof: int = 1,
numeric_only: bool_t = False,
**kwargs,
):
return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs)
setattr(cls, "sem", sem)
_num_ddof_doc,
desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
notes="",
examples=_var_examples,
)
def var(
self,
axis: Axis | None = None,
skipna: bool_t = True,
ddof: int = 1,
numeric_only: bool_t = False,
**kwargs,
):
return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs)
setattr(cls, "var", var)
_num_ddof_doc,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
notes=_std_notes,
examples=_std_examples,
)
def std(
self,
axis: Axis | None = None,
skipna: bool_t = True,
ddof: int = 1,
numeric_only: bool_t = False,
**kwargs,
):
return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs)
setattr(cls, "std", std)
_cnum_doc,
desc="minimum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="min",
examples=_cummin_examples,
)
def cummin(
self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs
):
return NDFrame.cummin(self, axis, skipna, *args, **kwargs)
setattr(cls, "cummin", cummin)
_cnum_doc,
desc="maximum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="max",
examples=_cummax_examples,
)
def cummax(
self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs
):
return NDFrame.cummax(self, axis, skipna, *args, **kwargs)
setattr(cls, "cummax", cummax)
_cnum_doc,
desc="sum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="sum",
examples=_cumsum_examples,
)
def cumsum(
self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs
):
return NDFrame.cumsum(self, axis, skipna, *args, **kwargs)
setattr(cls, "cumsum", cumsum)
_cnum_doc,
desc="product",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="prod",
examples=_cumprod_examples,
)
def cumprod(
self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs
):
return NDFrame.cumprod(self, axis, skipna, *args, **kwargs)
setattr(cls, "cumprod", cumprod)
# error: Untyped decorator makes function "sum" untyped
_num_doc,
desc="Return the sum of the values over the requested axis.\n\n"
"This is equivalent to the method ``numpy.sum``.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=_stat_func_see_also,
examples=_sum_examples,
)
def sum(
self,
axis: Axis | None = None,
skipna: bool_t = True,
numeric_only: bool_t = False,
min_count: int = 0,
**kwargs,
):
return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs)
setattr(cls, "sum", sum)
_num_doc,
desc="Return the product of the values over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=_stat_func_see_also,
examples=_prod_examples,
)
def prod(
self,
axis: Axis | None = None,
skipna: bool_t = True,
numeric_only: bool_t = False,
min_count: int = 0,
**kwargs,
):
return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs)
setattr(cls, "prod", prod)
cls.product = prod
_num_doc,
desc="Return the mean of the values over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def mean(
self,
axis: AxisInt | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
):
return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs)
setattr(cls, "mean", mean)
_num_doc,
desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def skew(
self,
axis: AxisInt | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
):
return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs)
setattr(cls, "skew", skew)
_num_doc,
desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def kurt(
self,
axis: Axis | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
):
return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs)
setattr(cls, "kurt", kurt)
cls.kurtosis = kurt
_num_doc,
desc="Return the median of the values over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def median(
self,
axis: AxisInt | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
):
return NDFrame.median(self, axis, skipna, numeric_only, **kwargs)
setattr(cls, "median", median)
_num_doc,
desc="Return the maximum of the values over the requested axis.\n\n"
"If you want the *index* of the maximum, use ``idxmax``. This is "
"the equivalent of the ``numpy.ndarray`` method ``argmax``.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=_stat_func_see_also,
examples=_max_examples,
)
def max(
self,
axis: AxisInt | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
):
return NDFrame.max(self, axis, skipna, numeric_only, **kwargs)
setattr(cls, "max", max)
_num_doc,
desc="Return the minimum of the values over the requested axis.\n\n"
"If you want the *index* of the minimum, use ``idxmin``. This is "
"the equivalent of the ``numpy.ndarray`` method ``argmin``.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=_stat_func_see_also,
examples=_min_examples,
)
def min(
self,
axis: AxisInt | None = 0,
skipna: bool_t = True,
numeric_only: bool_t = False,
**kwargs,
):
return NDFrame.min(self, axis, skipna, numeric_only, **kwargs)
setattr(cls, "min", min)
def rolling(
self,
window: int | dt.timedelta | str | BaseOffset | BaseIndexer,
min_periods: int | None = None,
center: bool_t = False,
win_type: str | None = None,
on: str | None = None,
axis: Axis = 0,
closed: str | None = None,
step: int | None = None,
method: str = "single",
) -> Window | Rolling:
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
step=step,
method=method,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
step=step,
method=method,
)
def expanding(
self,
min_periods: int = 1,
axis: Axis = 0,
method: str = "single",
) -> Expanding:
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, axis=axis, method=method)
def ewm(
self,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool_t = True,
ignore_na: bool_t = False,
axis: Axis = 0,
times: np.ndarray | DataFrame | Series | None = None,
method: str = "single",
) -> ExponentialMovingWindow:
axis = self._get_axis_number(axis)
return ExponentialMovingWindow(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
method=method,
)
# ----------------------------------------------------------------------
# Arithmetic Methods
def _inplace_method(self, other, op):
"""
Wrap arithmetic method to operate inplace.
"""
result = op(self, other)
if (
self.ndim == 1
and result._indexed_same(self)
and is_dtype_equal(result.dtype, self.dtype)
):
# GH#36498 this inplace op can _actually_ be inplace.
# Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager,
# BlockManager, SingleBlockManager]" has no attribute "setitem_inplace"
self._mgr.setitem_inplace( # type: ignore[union-attr]
slice(None), result._values
)
return self
# Delete cacher
self._reset_cacher()
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(
result.reindex_like(self, copy=False), verify_is_copy=False
)
return self
def __iadd__(self: NDFrameT, other) -> NDFrameT:
# error: Unsupported left operand type for + ("Type[NDFrame]")
return self._inplace_method(other, type(self).__add__) # type: ignore[operator]
def __isub__(self: NDFrameT, other) -> NDFrameT:
# error: Unsupported left operand type for - ("Type[NDFrame]")
return self._inplace_method(other, type(self).__sub__) # type: ignore[operator]
def __imul__(self: NDFrameT, other) -> NDFrameT:
# error: Unsupported left operand type for * ("Type[NDFrame]")
return self._inplace_method(other, type(self).__mul__) # type: ignore[operator]
def __itruediv__(self: NDFrameT, other) -> NDFrameT:
# error: Unsupported left operand type for / ("Type[NDFrame]")
return self._inplace_method(
other, type(self).__truediv__ # type: ignore[operator]
)
def __ifloordiv__(self: NDFrameT, other) -> NDFrameT:
# error: Unsupported left operand type for // ("Type[NDFrame]")
return self._inplace_method(
other, type(self).__floordiv__ # type: ignore[operator]
)
def __imod__(self: NDFrameT, other) -> NDFrameT:
# error: Unsupported left operand type for % ("Type[NDFrame]")
return self._inplace_method(other, type(self).__mod__) # type: ignore[operator]
def __ipow__(self: NDFrameT, other) -> NDFrameT:
# error: Unsupported left operand type for ** ("Type[NDFrame]")
return self._inplace_method(other, type(self).__pow__) # type: ignore[operator]
def __iand__(self: NDFrameT, other) -> NDFrameT:
# error: Unsupported left operand type for & ("Type[NDFrame]")
return self._inplace_method(other, type(self).__and__) # type: ignore[operator]
def __ior__(self: NDFrameT, other) -> NDFrameT:
# error: Unsupported left operand type for | ("Type[NDFrame]")
return self._inplace_method(other, type(self).__or__) # type: ignore[operator]
def __ixor__(self: NDFrameT, other) -> NDFrameT:
# error: Unsupported left operand type for ^ ("Type[NDFrame]")
return self._inplace_method(other, type(self).__xor__) # type: ignore[operator]
# ----------------------------------------------------------------------
# Misc methods
def _find_valid_index(self, *, how: str) -> Hashable | None:
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how=how, is_valid=~isna(self._values))
if idxpos is None:
return None
return self.index[idxpos]
def first_valid_index(self) -> Hashable | None:
"""
Return index for {position} non-NA value or None, if no non-NA value is found.
Returns
-------
type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty {klass}.
"""
return self._find_valid_index(how="first")
def last_valid_index(self) -> Hashable | None:
return self._find_valid_index(how="last")
class BinOp(Op):
"""
Hold a binary operator and its operands.
Parameters
----------
op : str
lhs : Term or Op
rhs : Term or Op
"""
def __init__(self, op: str, lhs, rhs) -> None:
super().__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError as err:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError(
f"Invalid binary operator {repr(op)}, valid operators are {keys}"
) from err
def __call__(self, env):
"""
Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine: str, parser, term_type, eval_in_python):
"""
Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == "python":
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(
env,
engine=engine,
parser=parser,
term_type=term_type,
eval_in_python=eval_in_python,
)
right = self.rhs.evaluate(
env,
engine=engine,
parser=parser,
term_type=term_type,
eval_in_python=eval_in_python,
)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
from pandas.core.computation.eval import eval
res = eval(self, local_dict=env, engine=engine, parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self) -> None:
"""
Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
encoder: Callable
if self.encoding is not None:
encoder = partial(pprint_thing_encoded, encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = Timestamp(ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert("UTC")
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = Timestamp(ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert("UTC")
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
rhs = self.rhs
lhs = self.lhs
# GH#24883 unwrap dtype if necessary to ensure we have a type object
rhs_rt = rhs.return_type
rhs_rt = getattr(rhs_rt, "type", rhs_rt)
lhs_rt = lhs.return_type
lhs_rt = getattr(lhs_rt, "type", lhs_rt)
if (
(lhs.is_scalar or rhs.is_scalar)
and self.op in _bool_ops_dict
and (
not (
issubclass(rhs_rt, (bool, np.bool_))
and issubclass(lhs_rt, (bool, np.bool_))
)
)
):
raise NotImplementedError("cannot evaluate scalar only bool ops")
The provided code snippet includes necessary dependencies for implementing the `eval` function. Write a Python function `def eval( expr: str | BinOp, # we leave BinOp out of the docstr bc it isn't for users parser: str = "pandas", engine: str | None = None, local_dict=None, global_dict=None, resolvers=(), level: int = 0, target=None, inplace: bool = False, )` to solve the following problem:
Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : {'pandas', 'python'}, default 'pandas' The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : {'python', 'numexpr'}, default 'numexpr' The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'`` : This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'`` : Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series, or None The completion value of evaluating the given code or None if ``inplace=True``. Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. Examples -------- >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]}) >>> df animal age 0 dog 10 1 pig 20 We can add a new column using ``pd.eval``: >>> pd.eval("double_age = df.age * 2", target=df) animal age double_age 0 dog 10 20 1 pig 20 40
Here is the function:
def eval(
expr: str | BinOp, # we leave BinOp out of the docstr bc it isn't for users
parser: str = "pandas",
engine: str | None = None,
local_dict=None,
global_dict=None,
resolvers=(),
level: int = 0,
target=None,
inplace: bool = False,
):
"""
Evaluate a Python expression as a string using various backends.
The following arithmetic operations are supported: ``+``, ``-``, ``*``,
``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
:keyword:`or`, and :keyword:`not` with the same semantics as the
corresponding bitwise operators. :class:`~pandas.Series` and
:class:`~pandas.DataFrame` objects are supported and behave as they would
with plain ol' Python evaluation.
Parameters
----------
expr : str
The expression to evaluate. This string cannot contain any Python
`statements
<https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__.
parser : {'pandas', 'python'}, default 'pandas'
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
engine : {'python', 'numexpr'}, default 'numexpr'
The engine used to evaluate the expression. Supported engines are
- None : tries to use ``numexpr``, falls back to ``python``
- ``'numexpr'`` : This default engine evaluates pandas objects using
numexpr for large speed ups in complex expressions with large frames.
- ``'python'`` : Performs operations as if you had ``eval``'d in top
level python. This engine is generally not that useful.
More backends may be available in the future.
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
global_dict : dict or None, optional
A dictionary of global variables, taken from globals() by default.
resolvers : list of dict-like or None, optional
A list of objects implementing the ``__getitem__`` special method that
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~DataFrame.query` method to inject the
``DataFrame.index`` and ``DataFrame.columns``
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
The number of prior stack frames to traverse and add to the current
scope. Most users will **not** need to change this parameter.
target : object, optional, default None
This is the target object for assignment. It is used when there is
variable assignment in the expression. If so, then `target` must
support item assignment with string keys, and if a copy is being
returned, it must also support `.copy()`.
inplace : bool, default False
If `target` is provided, and the expression mutates `target`, whether
to modify `target` inplace. Otherwise, return a copy of `target` with
the mutation.
Returns
-------
ndarray, numeric scalar, DataFrame, Series, or None
The completion value of evaluating the given code or None if ``inplace=True``.
Raises
------
ValueError
There are many instances where such an error can be raised:
- `target=None`, but the expression is multiline.
- The expression is multiline, but not all them have item assignment.
An example of such an arrangement is this:
a = b + 1
a + 2
Here, there are expressions on different lines, making it multiline,
but the last line has no variable assigned to the output of `a + 2`.
- `inplace=True`, but the expression is missing item assignment.
- Item assignment is provided, but the `target` does not support
string item assignment.
- Item assignment is provided and `inplace=False`, but the `target`
does not support the `.copy()` method
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
recursively cast to ``float64``.
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
Examples
--------
>>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]})
>>> df
animal age
0 dog 10
1 pig 20
We can add a new column using ``pd.eval``:
>>> pd.eval("double_age = df.age * 2", target=df)
animal age double_age
0 dog 10 20
1 pig 20 40
"""
inplace = validate_bool_kwarg(inplace, "inplace")
exprs: list[str | BinOp]
if isinstance(expr, str):
_check_expression(expr)
exprs = [e.strip() for e in expr.splitlines() if e.strip() != ""]
else:
# ops.BinOp; for internal compat, not intended to be passed by users
exprs = [expr]
multi_line = len(exprs) > 1
if multi_line and target is None:
raise ValueError(
"multi-line expressions are only valid in the "
"context of data, use DataFrame.eval"
)
engine = _check_engine(engine)
_check_parser(parser)
_check_resolvers(resolvers)
ret = None
first_expr = True
target_modified = False
for expr in exprs:
expr = _convert_expression(expr)
_check_for_locals(expr, level, parser)
# get our (possibly passed-in) scope
env = ensure_scope(
level + 1,
global_dict=global_dict,
local_dict=local_dict,
resolvers=resolvers,
target=target,
)
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env)
if engine == "numexpr" and (
is_extension_array_dtype(parsed_expr.terms.return_type)
or getattr(parsed_expr.terms, "operand_types", None) is not None
and any(
is_extension_array_dtype(elem)
for elem in parsed_expr.terms.operand_types
)
):
warnings.warn(
"Engine has switched to 'python' because numexpr does not support "
"extension array dtypes. Please set your engine to python manually.",
RuntimeWarning,
stacklevel=find_stack_level(),
)
engine = "python"
# construct the engine and evaluate the parsed expression
eng = ENGINES[engine]
eng_inst = eng(parsed_expr)
ret = eng_inst.evaluate()
if parsed_expr.assigner is None:
if multi_line:
raise ValueError(
"Multi-line expressions are only valid "
"if all expressions contain an assignment"
)
if inplace:
raise ValueError("Cannot operate inplace if there is no assignment")
# assign if needed
assigner = parsed_expr.assigner
if env.target is not None and assigner is not None:
target_modified = True
# if returning a copy, copy only on the first assignment
if not inplace and first_expr:
try:
target = env.target.copy()
except AttributeError as err:
raise ValueError("Cannot return a copy of the target") from err
else:
target = env.target
# TypeError is most commonly raised (e.g. int, list), but you
# get IndexError if you try to do this assignment on np.ndarray.
# we will ignore numpy warnings here; e.g. if trying
# to use a non-numeric indexer
try:
with warnings.catch_warnings(record=True):
# TODO: Filter the warnings we actually care about here.
if inplace and isinstance(target, NDFrame):
target.loc[:, assigner] = ret
else:
target[assigner] = ret
except (TypeError, IndexError) as err:
raise ValueError("Cannot assign expression output to target") from err
if not resolvers:
resolvers = ({assigner: ret},)
else:
# existing resolver needs updated to handle
# case of mutating existing column in copy
for resolver in resolvers:
if assigner in resolver:
resolver[assigner] = ret
break
else:
resolvers += ({assigner: ret},)
ret = None
first_expr = False
# We want to exclude `inplace=None` as being False.
if inplace is False:
return target if target_modified else ret | Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : {'pandas', 'python'}, default 'pandas' The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : {'python', 'numexpr'}, default 'numexpr' The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'`` : This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'`` : Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series, or None The completion value of evaluating the given code or None if ``inplace=True``. Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. Examples -------- >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]}) >>> df animal age 0 dog 10 1 pig 20 We can add a new column using ``pd.eval``: >>> pd.eval("double_age = df.age * 2", target=df) animal age double_age 0 dog 10 20 1 pig 20 40 |
173,267 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Callable,
Sequence,
)
import warnings
import numpy as np
from pandas.errors import PerformanceWarning
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.computation.common import result_type_many
def _align_core_single_unary_op(
term,
) -> tuple[partial | type[NDFrame], dict[str, Index] | None]:
typ: partial | type[NDFrame]
axes: dict[str, Index] | None = None
if isinstance(term.value, np.ndarray):
typ = partial(np.asanyarray, dtype=term.value.dtype)
else:
typ = type(term.value)
if hasattr(term.value, "axes"):
axes = _zip_axes_from_type(typ, term.value.axes)
return typ, axes
def _any_pandas_objects(terms) -> bool:
"""
Check a sequence of terms for instances of PandasObject.
"""
return any(isinstance(term.value, PandasObject) for term in terms)
def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ...
class Callable(BaseTypingInstance):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
def py__get__(self, instance, class_value):
return ValueSet([self])
def result_type_many(*arrays_and_dtypes):
"""
Wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32)
argument limit.
"""
try:
return np.result_type(*arrays_and_dtypes)
except ValueError:
# we have > NPY_MAXARGS terms in our expression
return reduce(np.result_type, arrays_and_dtypes)
except TypeError:
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import is_extension_array_dtype
arr_and_dtypes = list(arrays_and_dtypes)
ea_dtypes, non_ea_dtypes = [], []
for arr_or_dtype in arr_and_dtypes:
if is_extension_array_dtype(arr_or_dtype):
ea_dtypes.append(arr_or_dtype)
else:
non_ea_dtypes.append(arr_or_dtype)
if non_ea_dtypes:
try:
np_dtype = np.result_type(*non_ea_dtypes)
except ValueError:
np_dtype = reduce(np.result_type, arrays_and_dtypes)
return find_common_type(ea_dtypes + [np_dtype])
return find_common_type(ea_dtypes)
F = TypeVar("F", bound=FuncType)
def _filter_special_cases(f) -> Callable[[F], F]:
@wraps(f)
def wrapper(terms):
# single unary operand
if len(terms) == 1:
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
# we don't have any pandas objects
if not _any_pandas_objects(terms):
return result_type_many(*term_values), None
return f(terms)
return wrapper | null |
173,268 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Callable,
Sequence,
)
import warnings
import numpy as np
from pandas.errors import PerformanceWarning
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.computation.common import result_type_many
def _zip_axes_from_type(
typ: type[NDFrame], new_axes: Sequence[Index]
) -> dict[str, Index]:
return {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)}
def _align_core(terms):
term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")]
term_dims = [terms[i].value.ndim for i in term_index]
from pandas import Series
ndims = Series(dict(zip(term_index, term_dims)))
# initial axes are the axes of the largest-axis'd term
biggest = terms[ndims.idxmax()].value
typ = biggest._constructor
axes = biggest.axes
naxes = len(axes)
gt_than_one_axis = naxes > 1
for value in (terms[i].value for i in term_index):
is_series = isinstance(value, ABCSeries)
is_series_and_gt_one_axis = is_series and gt_than_one_axis
for axis, items in enumerate(value.axes):
if is_series_and_gt_one_axis:
ax, itm = naxes - 1, value.index
else:
ax, itm = axis, items
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how="outer")
for i, ndim in ndims.items():
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
if hasattr(ti, "reindex"):
transpose = isinstance(ti, ABCSeries) and naxes > 1
reindexer = axes[naxes - 1] if transpose else items
term_axis_size = len(ti.axes[axis])
reindexer_size = len(reindexer)
ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
if ordm >= 1 and reindexer_size >= 10000:
w = (
f"Alignment difference on axis {axis} is larger "
f"than an order of magnitude on term {repr(terms[i].name)}, "
f"by more than {ordm:.4g}; performance may suffer."
)
warnings.warn(
w, category=PerformanceWarning, stacklevel=find_stack_level()
)
f = partial(ti.reindex, reindexer, axis=axis, copy=False)
terms[i].update(f())
terms[i].update(terms[i].value.values)
return typ, _zip_axes_from_type(typ, axes)
ABCSeries = cast(
"Type[Series]",
create_pandas_abc_type("ABCSeries", "_typ", ("series",)),
)
ABCDataFrame = cast(
"Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
)
def result_type_many(*arrays_and_dtypes):
"""
Wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32)
argument limit.
"""
try:
return np.result_type(*arrays_and_dtypes)
except ValueError:
# we have > NPY_MAXARGS terms in our expression
return reduce(np.result_type, arrays_and_dtypes)
except TypeError:
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import is_extension_array_dtype
arr_and_dtypes = list(arrays_and_dtypes)
ea_dtypes, non_ea_dtypes = [], []
for arr_or_dtype in arr_and_dtypes:
if is_extension_array_dtype(arr_or_dtype):
ea_dtypes.append(arr_or_dtype)
else:
non_ea_dtypes.append(arr_or_dtype)
if non_ea_dtypes:
try:
np_dtype = np.result_type(*non_ea_dtypes)
except ValueError:
np_dtype = reduce(np.result_type, arrays_and_dtypes)
return find_common_type(ea_dtypes + [np_dtype])
return find_common_type(ea_dtypes)
The provided code snippet includes necessary dependencies for implementing the `align_terms` function. Write a Python function `def align_terms(terms)` to solve the following problem:
Align a set of terms.
Here is the function:
def align_terms(terms):
"""
Align a set of terms.
"""
try:
# flatten the parse tree (a nested list, really)
terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, (ABCSeries, ABCDataFrame)):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.is_scalar for term in terms):
return result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes | Align a set of terms. |
173,269 | from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Callable,
Sequence,
)
import warnings
import numpy as np
from pandas.errors import PerformanceWarning
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.computation.common import result_type_many
class partial(Generic[_T]):
func: Callable[..., _T]
args: Tuple[Any, ...]
keywords: Dict[str, Any]
def __init__(self, func: Callable[..., _T], *args: Any, **kwargs: Any) -> None: ...
def __call__(self, *args: Any, **kwargs: Any) -> _T: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
class PandasObject(DirNamesMixin):
"""
Baseclass for various pandas objects.
"""
# results from calls to methods decorated with cache_readonly get added to _cache
_cache: dict[str, Any]
def _constructor(self):
"""
Class constructor (for this class it's just `__class__`.
"""
return type(self)
def __repr__(self) -> str:
"""
Return a string representation for a particular object.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key: str | None = None) -> None:
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if not hasattr(self, "_cache"):
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self) -> int:
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
memory_usage = getattr(self, "memory_usage", None)
if memory_usage:
mem = memory_usage(deep=True) # pylint: disable=not-callable
return int(mem if is_scalar(mem) else mem.sum())
# no memory_usage attribute, so fall back to object's 'sizeof'
return super().__sizeof__()
The provided code snippet includes necessary dependencies for implementing the `reconstruct_object` function. Write a Python function `def reconstruct_object(typ, obj, axes, dtype)` to solve the following problem:
Reconstruct an object given its type, raw value, and possibly empty (None) axes. Parameters ---------- typ : object A type obj : object The value to use in the type constructor axes : dict The axes to use to construct the resulting pandas object Returns ------- ret : typ An object of type ``typ`` with the value `obj` and possible axes `axes`.
Here is the function:
def reconstruct_object(typ, obj, axes, dtype):
"""
Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if not isinstance(typ, partial) and issubclass(typ, PandasObject):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if (
len(obj.shape) == 1
and len(obj) == 1
and not isinstance(ret_value, np.ndarray)
):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value | Reconstruct an object given its type, raw value, and possibly empty (None) axes. Parameters ---------- typ : object A type obj : object The value to use in the type constructor axes : dict The axes to use to construct the resulting pandas object Returns ------- ret : typ An object of type ``typ`` with the value `obj` and possible axes `axes`. |
173,270 | from __future__ import annotations
from typing import (
Callable,
final,
)
import warnings
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
class Callable(BaseTypingInstance):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
def py__get__(self, instance, class_value):
return ValueSet([self])
The provided code snippet includes necessary dependencies for implementing the `delegate_names` function. Write a Python function `def delegate_names( delegate, accessors: list[str], typ: str, overwrite: bool = False, accessor_mapping: Callable[[str], str] = lambda x: x, raise_on_missing: bool = True, )` to solve the following problem:
Add delegated names to a class using a class decorator. This provides an alternative usage to directly calling `_add_delegate_accessors` below a class definition. Parameters ---------- delegate : object The class to get methods/properties & doc-strings. accessors : Sequence[str] List of accessor to add. typ : {'property', 'method'} overwrite : bool, default False Overwrite the method/property in the target class if it exists. accessor_mapping: Callable, default lambda x: x Callable to map the delegate's function to the cls' function. raise_on_missing: bool, default True Raise if an accessor does not exist on delegate. False skips the missing accessor. Returns ------- callable A class decorator. Examples -------- @delegate_names(Categorical, ["categories", "ordered"], "property") class CategoricalAccessor(PandasDelegate): [...]
Here is the function:
def delegate_names(
delegate,
accessors: list[str],
typ: str,
overwrite: bool = False,
accessor_mapping: Callable[[str], str] = lambda x: x,
raise_on_missing: bool = True,
):
"""
Add delegated names to a class using a class decorator. This provides
an alternative usage to directly calling `_add_delegate_accessors`
below a class definition.
Parameters
----------
delegate : object
The class to get methods/properties & doc-strings.
accessors : Sequence[str]
List of accessor to add.
typ : {'property', 'method'}
overwrite : bool, default False
Overwrite the method/property in the target class if it exists.
accessor_mapping: Callable, default lambda x: x
Callable to map the delegate's function to the cls' function.
raise_on_missing: bool, default True
Raise if an accessor does not exist on delegate.
False skips the missing accessor.
Returns
-------
callable
A class decorator.
Examples
--------
@delegate_names(Categorical, ["categories", "ordered"], "property")
class CategoricalAccessor(PandasDelegate):
[...]
"""
def add_delegate_accessors(cls):
cls._add_delegate_accessors(
delegate,
accessors,
typ,
overwrite=overwrite,
accessor_mapping=accessor_mapping,
raise_on_missing=raise_on_missing,
)
return cls
return add_delegate_accessors | Add delegated names to a class using a class decorator. This provides an alternative usage to directly calling `_add_delegate_accessors` below a class definition. Parameters ---------- delegate : object The class to get methods/properties & doc-strings. accessors : Sequence[str] List of accessor to add. typ : {'property', 'method'} overwrite : bool, default False Overwrite the method/property in the target class if it exists. accessor_mapping: Callable, default lambda x: x Callable to map the delegate's function to the cls' function. raise_on_missing: bool, default True Raise if an accessor does not exist on delegate. False skips the missing accessor. Returns ------- callable A class decorator. Examples -------- @delegate_names(Categorical, ["categories", "ordered"], "property") class CategoricalAccessor(PandasDelegate): [...] |
173,271 | from __future__ import annotations
from typing import (
Callable,
final,
)
import warnings
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
def _register_accessor(name, cls):
def register_dataframe_accessor(name):
from pandas import DataFrame
return _register_accessor(name, DataFrame) | null |
173,272 | from __future__ import annotations
from typing import (
Callable,
final,
)
import warnings
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
def _register_accessor(name, cls):
"""
Register a custom accessor on {klass} objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
When accessed, your accessor will be initialized with the pandas object
the user is interacting with. So the signature must be
.. code-block:: python
def __init__(self, pandas_object): # noqa: E999
...
For consistency with pandas methods, you should raise an ``AttributeError``
if the data passed to your accessor has an incorrect dtype.
>>> pd.Series(['a', 'b']).dt
Traceback (most recent call last):
...
AttributeError: Can only use .dt accessor with datetimelike values
Examples
--------
In your library code::
import pandas as pd
class GeoAccessor:
def __init__(self, pandas_obj):
self._obj = pandas_obj
def center(self):
# return the geographic center point of this DataFrame
lat = self._obj.latitude
lon = self._obj.longitude
return (float(lon.mean()), float(lat.mean()))
def plot(self):
# plot this array's data on a map, e.g., using Cartopy
pass
Back in an interactive IPython session:
.. code-block:: ipython
In [1]: ds = pd.DataFrame({{"longitude": np.linspace(0, 10),
...: "latitude": np.linspace(0, 20)}})
In [2]: ds.geo.center
Out[2]: (5.0, 10.0)
In [3]: ds.geo.plot() # plots data on a map
"""
def decorator(accessor):
if hasattr(cls, name):
warnings.warn(
f"registration of accessor {repr(accessor)} under name "
f"{repr(name)} for type {repr(cls)} is overriding a preexisting "
f"attribute with the same name.",
UserWarning,
stacklevel=find_stack_level(),
)
setattr(cls, name, CachedAccessor(name, accessor))
cls._accessors.add(name)
return accessor
return decorator
def register_series_accessor(name):
from pandas import Series
return _register_accessor(name, Series) | null |
173,273 | from __future__ import annotations
from typing import (
Callable,
final,
)
import warnings
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
def _register_accessor(name, cls):
def register_index_accessor(name):
from pandas import Index
return _register_accessor(name, Index) | null |
173,274 | from __future__ import annotations
import numpy as np
from pandas._libs.lib import i8max
from pandas._libs.tslibs import (
BaseOffset,
OutOfBoundsDatetime,
Timedelta,
Timestamp,
iNaT,
)
from pandas._typing import npt
def _generate_range_overflow_safe(
endpoint: int, periods: int, stride: int, side: str = "start"
) -> int:
"""
Calculate the second endpoint for passing to np.arange, checking
to avoid an integer overflow. Catch OverflowError and re-raise
as OutOfBoundsDatetime.
Parameters
----------
endpoint : int
nanosecond timestamp of the known endpoint of the desired range
periods : int
number of periods in the desired range
stride : int
nanoseconds between periods in the desired range
side : {'start', 'end'}
which end of the range `endpoint` refers to
Returns
-------
other_end : int
Raises
------
OutOfBoundsDatetime
"""
# GH#14187 raise instead of incorrectly wrapping around
assert side in ["start", "end"]
i64max = np.uint64(i8max)
msg = f"Cannot generate range with {side}={endpoint} and periods={periods}"
with np.errstate(over="raise"):
# if periods * strides cannot be multiplied within the *uint64* bounds,
# we cannot salvage the operation by recursing, so raise
try:
addend = np.uint64(periods) * np.uint64(np.abs(stride))
except FloatingPointError as err:
raise OutOfBoundsDatetime(msg) from err
if np.abs(addend) <= i64max:
# relatively easy case without casting concerns
return _generate_range_overflow_safe_signed(endpoint, periods, stride, side)
elif (endpoint > 0 and side == "start" and stride > 0) or (
endpoint < 0 < stride and side == "end"
):
# no chance of not-overflowing
raise OutOfBoundsDatetime(msg)
elif side == "end" and endpoint - stride <= i64max < endpoint:
# in _generate_regular_range we added `stride` thereby overflowing
# the bounds. Adjust to fix this.
return _generate_range_overflow_safe(
endpoint - stride, periods - 1, stride, side
)
# split into smaller pieces
mid_periods = periods // 2
remaining = periods - mid_periods
assert 0 < remaining < periods, (remaining, periods, endpoint, stride)
midpoint = _generate_range_overflow_safe(endpoint, mid_periods, stride, side)
return _generate_range_overflow_safe(midpoint, remaining, stride, side)
The provided code snippet includes necessary dependencies for implementing the `generate_regular_range` function. Write a Python function `def generate_regular_range( start: Timestamp | Timedelta | None, end: Timestamp | Timedelta | None, periods: int | None, freq: BaseOffset, unit: str = "ns", ) -> npt.NDArray[np.intp]` to solve the following problem:
Generate a range of dates or timestamps with the spans between dates described by the given `freq` DateOffset. Parameters ---------- start : Timedelta, Timestamp or None First point of produced date range. end : Timedelta, Timestamp or None Last point of produced date range. periods : int or None Number of periods in produced date range. freq : Tick Describes space between dates in produced date range. unit : str, default "ns" The resolution the output is meant to represent. Returns ------- ndarray[np.int64] Representing the given resolution.
Here is the function:
def generate_regular_range(
start: Timestamp | Timedelta | None,
end: Timestamp | Timedelta | None,
periods: int | None,
freq: BaseOffset,
unit: str = "ns",
) -> npt.NDArray[np.intp]:
"""
Generate a range of dates or timestamps with the spans between dates
described by the given `freq` DateOffset.
Parameters
----------
start : Timedelta, Timestamp or None
First point of produced date range.
end : Timedelta, Timestamp or None
Last point of produced date range.
periods : int or None
Number of periods in produced date range.
freq : Tick
Describes space between dates in produced date range.
unit : str, default "ns"
The resolution the output is meant to represent.
Returns
-------
ndarray[np.int64]
Representing the given resolution.
"""
istart = start._value if start is not None else None
iend = end._value if end is not None else None
freq.nanos # raises if non-fixed frequency
td = Timedelta(freq)
try:
td = td.as_unit( # pyright: ignore[reportGeneralTypeIssues]
unit, round_ok=False
)
except ValueError as err:
raise ValueError(
f"freq={freq} is incompatible with unit={unit}. "
"Use a lower freq or a higher unit instead."
) from err
stride = int(td._value)
if periods is None and istart is not None and iend is not None:
b = istart
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = b + (iend - b) // stride * stride + stride // 2 + 1
elif istart is not None and periods is not None:
b = istart
e = _generate_range_overflow_safe(b, periods, stride, side="start")
elif iend is not None and periods is not None:
e = iend + stride
b = _generate_range_overflow_safe(e, periods, stride, side="end")
else:
raise ValueError(
"at least 'start' or 'end' should be specified if a 'period' is given."
)
with np.errstate(over="raise"):
# If the range is sufficiently large, np.arange may overflow
# and incorrectly return an empty array if not caught.
try:
values = np.arange(b, e, stride, dtype=np.int64)
except FloatingPointError:
xdr = [b]
while xdr[-1] != e:
xdr.append(xdr[-1] + stride)
values = np.array(xdr[:-1], dtype=np.int64)
return values | Generate a range of dates or timestamps with the spans between dates described by the given `freq` DateOffset. Parameters ---------- start : Timedelta, Timestamp or None First point of produced date range. end : Timedelta, Timestamp or None Last point of produced date range. periods : int or None Number of periods in produced date range. freq : Tick Describes space between dates in produced date range. unit : str, default "ns" The resolution the output is meant to represent. Returns ------- ndarray[np.int64] Representing the given resolution. |
173,275 | from __future__ import annotations
from csv import QUOTE_NONNUMERIC
from functools import partial
import operator
from shutil import get_terminal_size
from typing import (
TYPE_CHECKING,
Hashable,
Iterator,
Literal,
Sequence,
TypeVar,
cast,
overload,
)
import numpy as np
from pandas._config import get_option
from pandas._libs import (
NaT,
algos as libalgos,
lib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._typing import (
ArrayLike,
AstypeArg,
AxisInt,
Dtype,
NpDtype,
Ordered,
Shape,
SortKind,
npt,
type_t,
)
from pandas.compat.numpy import function as nv
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
find_common_type,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_any_real_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_hashable,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
ExtensionDtype,
)
from pandas.core.dtypes.generic import (
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
)
from pandas.core import (
algorithms,
arraylike,
ops,
)
from pandas.core.accessor import (
PandasDelegate,
delegate_names,
)
from pandas.core.algorithms import (
factorize,
take_nd,
)
from pandas.core.arrays._mixins import (
NDArrayBackedExtensionArray,
ravel_compat,
)
from pandas.core.base import (
ExtensionArray,
NoNewAttributesMixin,
PandasObject,
)
import pandas.core.common as com
from pandas.core.construction import (
extract_array,
sanitize_array,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.core.strings.object_array import ObjectStringArrayMixin
from pandas.io.formats import console
class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMixin):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`__
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
['a', 'b', 'c', 'a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Missing values are not included as a category.
>>> c = pd.Categorical([1, 2, 3, 1, 2, 3, np.nan])
>>> c
[1, 2, 3, 1, 2, 3, NaN]
Categories (3, int64): [1, 2, 3]
However, their presence is indicated in the `codes` attribute
by code `-1`.
>>> c.codes
array([ 0, 1, 2, 0, 1, 2, -1], dtype=int8)
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
['a', 'b', 'c', 'a', 'b', 'c']
Categories (3, object): ['c' < 'b' < 'a']
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
# tolist is not actually deprecated, just suppressed in the __dir__
_hidden_attrs = PandasObject._hidden_attrs | frozenset(["tolist"])
_typ = "categorical"
_dtype: CategoricalDtype
def __init__(
self,
values,
categories=None,
ordered=None,
dtype: Dtype | None = None,
fastpath: bool = False,
copy: bool = True,
) -> None:
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
codes = coerce_indexer_dtype(values, dtype.categories)
dtype = CategoricalDtype(ordered=False).update_dtype(dtype)
super().__init__(codes, dtype)
return
if not is_list_like(values):
# GH#38433
raise TypeError("Categorical input must be list-like")
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndex, ABCSeries, ExtensionArray)):
values = com.convert_to_list_like(values)
if isinstance(values, list) and len(values) == 0:
# By convention, empty lists result in object dtype:
values = np.array([], dtype=object)
elif isinstance(values, np.ndarray):
if values.ndim > 1:
# preempt sanitize_array from raising ValueError
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
)
values = sanitize_array(values, None)
else:
# i.e. must be a list
arr = sanitize_array(values, None)
null_mask = isna(arr)
if null_mask.any():
# We remove null values here, then below will re-insert
# them, grep "full_codes"
arr_list = [values[idx] for idx in np.where(~null_mask)[0]]
# GH#44900 Do not cast to float if we have only missing values
if arr_list or arr.dtype == "object":
sanitize_dtype = None
else:
sanitize_dtype = arr.dtype
arr = sanitize_array(arr_list, None, dtype=sanitize_dtype)
values = arr
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = extract_array(values)._codes
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories, copy=copy
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
dtype = CategoricalDtype(ordered=False).update_dtype(dtype)
arr = coerce_indexer_dtype(codes, dtype.categories)
super().__init__(arr, dtype)
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
def _internal_fill_value(self) -> int:
# using the specific numpy integer instead of python int to get
# the correct dtype back from _quantile in the all-NA case
dtype = self._ndarray.dtype
return dtype.type(-1)
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> Categorical:
return Categorical(scalars, dtype=dtype, copy=copy)
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
...
def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
...
def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
...
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
dtype = pandas_dtype(dtype)
if self.dtype is dtype:
result = self.copy() if copy else self
elif is_categorical_dtype(dtype):
dtype = cast(CategoricalDtype, dtype)
# GH 10696/18593/18630
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
result = self._set_dtype(dtype)
elif isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy=copy)
elif is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
elif len(self.codes) == 0 or len(self.categories) == 0:
result = np.array(
self,
dtype=dtype,
copy=copy,
)
else:
# GH8628 (PERF): astype category codes instead of astyping array
new_cats = self.categories._values
try:
new_cats = new_cats.astype(dtype=dtype, copy=copy)
fill_value = self.categories._na_value
if not is_valid_na_for_dtype(fill_value, dtype):
fill_value = lib.item_from_zerodim(
np.array(self.categories._na_value).astype(dtype)
)
except (
TypeError, # downstream error msg for CategoricalIndex is misleading
ValueError,
):
msg = f"Cannot cast {self.categories.dtype} dtype to {dtype}"
raise ValueError(msg)
result = take_nd(
new_cats, ensure_platform_int(self._codes), fill_value=fill_value
)
return result
def to_list(self):
"""
Alias for tolist.
"""
return self.tolist()
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import (
Index,
to_datetime,
to_numeric,
to_timedelta,
)
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if is_any_real_numeric_dtype(dtype.categories):
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif is_bool_dtype(dtype.categories):
if true_values is None:
true_values = ["True", "TRUE", "true"]
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Index")
cats = cats.isin(true_values) # type: ignore[assignment]
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
def from_codes(
cls, codes, categories=None, ordered=None, dtype: Dtype | None = None
) -> Categorical:
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
['a', 'b', 'a', 'b']
Categories (2, object): ['a' < 'b']
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if isna(codes).any():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
# ------------------------------------------------------------------
# Categories/Codes/Ordered
def categories(self) -> Index:
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath: bool = False) -> None:
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
['a', 'b']
Categories (2, object): ['a', 'b']
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
['a', 'c']
Categories (2, object): ['a', 'c']
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
super().__init__(self._ndarray, new_dtype)
def _set_dtype(self, dtype: CategoricalDtype) -> Categorical:
"""
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value: bool) -> Categorical:
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
"""
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self.copy()
NDArrayBacked.__init__(cat, cat._ndarray, new_dtype)
return cat
def as_ordered(self) -> Categorical:
"""
Set the Categorical to be ordered.
Returns
-------
Categorical
Ordered Categorical.
"""
return self.set_ordered(True)
def as_unordered(self) -> Categorical:
"""
Set the Categorical to be unordered.
Returns
-------
Categorical
Unordered Categorical.
"""
return self.set_ordered(False)
def set_categories(self, new_categories, ordered=None, rename: bool = False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
Returns
-------
Categorical with reordered categories.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self.copy()
if rename:
if cat.dtype.categories is not None and len(new_dtype.categories) < len(
cat.dtype.categories
):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
codes = cat._codes
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
NDArrayBacked.__init__(cat, codes, new_dtype)
return cat
def rename_categories(self, new_categories) -> Categorical:
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
Returns
-------
Categorical
Categorical with renamed categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
['A', 'A', 'b']
Categories (2, object): ['A', 'b']
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
['A', 'A', 'B']
Categories (2, object): ['A', 'B']
"""
if is_dict_like(new_categories):
new_categories = [
new_categories.get(item, item) for item in self.categories
]
elif callable(new_categories):
new_categories = [new_categories(item) for item in self.categories]
cat = self.copy()
cat._set_categories(new_categories)
return cat
def reorder_categories(self, new_categories, ordered=None):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
Returns
-------
Categorical
Categorical with reordered categories.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
if (
len(self.categories) != len(new_categories)
or not self.categories.difference(new_categories).empty
):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered)
def add_categories(self, new_categories) -> Categorical:
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
Returns
-------
Categorical
Categorical with new categories added.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['c', 'b', 'c'])
>>> c
['c', 'b', 'c']
Categories (2, object): ['b', 'c']
>>> c.add_categories(['d', 'a'])
['c', 'b', 'c']
Categories (4, object): ['b', 'c', 'd', 'a']
"""
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
if hasattr(new_categories, "dtype"):
from pandas import Series
dtype = find_common_type(
[self.dtype.categories.dtype, new_categories.dtype]
)
new_categories = Series(
list(self.dtype.categories) + list(new_categories), dtype=dtype
)
else:
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self.copy()
codes = coerce_indexer_dtype(cat._ndarray, new_dtype.categories)
NDArrayBacked.__init__(cat, codes, new_dtype)
return cat
def remove_categories(self, removals):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
Returns
-------
Categorical
Categorical with removed categories.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd'])
>>> c
['a', 'c', 'b', 'c', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> c.remove_categories(['d', 'a'])
[NaN, 'c', 'b', 'c', NaN]
Categories (2, object): ['b', 'c']
"""
from pandas import Index
if not is_list_like(removals):
removals = [removals]
removals = Index(removals).unique().dropna()
new_categories = self.dtype.categories.difference(removals)
not_included = removals.difference(self.dtype.categories)
if len(not_included) != 0:
not_included = set(not_included)
raise ValueError(f"removals must all be in old categories: {not_included}")
return self.set_categories(new_categories, ordered=self.ordered, rename=False)
def remove_unused_categories(self) -> Categorical:
"""
Remove categories which are not used.
Returns
-------
Categorical
Categorical with unused categories dropped.
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd'])
>>> c
['a', 'c', 'b', 'c', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> c[2] = 'a'
>>> c[4] = 'c'
>>> c
['a', 'c', 'a', 'c', 'c']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> c.remove_unused_categories()
['a', 'c', 'a', 'c', 'c']
Categories (2, object): ['a', 'c']
"""
idx, inv = np.unique(self._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = self.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
new_codes = coerce_indexer_dtype(inv, new_dtype.categories)
cat = self.copy()
NDArrayBacked.__init__(cat, new_codes, new_dtype)
return cat
# ------------------------------------------------------------------
def map(self, mapper):
"""
Map categories using an input mapping or function.
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> cat.map(lambda x: x.upper())
['A', 'B', 'C']
Categories (3, object): ['A', 'B', 'C']
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
['first', 'second', 'third']
Categories (3, object): ['first', 'second', 'third']
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
['a', 'b', 'c']
Categories (3, object): ['a' < 'b' < 'c']
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# -------------------------------------------------------------
# Validators; ideally these can be de-duplicated
def _validate_setitem_value(self, value):
if not is_hashable(value):
# wrap scalars and hashable-listlikes in list
return self._validate_listlike(value)
else:
return self._validate_scalar(value)
def _validate_scalar(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raising TypeError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : int
Raises
------
TypeError
"""
if is_valid_na_for_dtype(fill_value, self.categories.dtype):
fill_value = -1
elif fill_value in self.categories:
fill_value = self._unbox_scalar(fill_value)
else:
raise TypeError(
"Cannot setitem on a Categorical with a new "
f"category ({fill_value}), set the categories first"
) from None
return fill_value
# -------------------------------------------------------------
def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_nd(self.categories._values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ gets all the way to an
# ndarray.
return np.asarray(ret)
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
if "out" in kwargs:
# e.g. test_numpy_ufuncs_out
return arraylike.dispatch_ufunc_with_out(
self, ufunc, method, *inputs, **kwargs
)
if method == "reduce":
# e.g. TestCategoricalAnalytics::test_min_max_ordered
result = arraylike.dispatch_reduction_ufunc(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for all other cases, raise for now (similarly as what happens in
# Series.__array_prepare__)
raise TypeError(
f"Object with dtype {self.dtype} cannot perform "
f"the numpy op {ufunc.__name__}"
)
def __setstate__(self, state) -> None:
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
return super().__setstate__(state)
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
if "_codes" in state and "_ndarray" not in state:
# backward compat, changed what is property vs attribute
state["_ndarray"] = state.pop("_codes")
super().__setstate__(state)
def nbytes(self) -> int:
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
def isna(self) -> np.ndarray:
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
np.ndarray[bool] of whether my values are null
See Also
--------
isna : Top-level isna.
isnull : Alias of isna.
Categorical.notna : Boolean inverse of Categorical.isna.
"""
return self._codes == -1
isnull = isna
def notna(self) -> np.ndarray:
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
np.ndarray[bool] of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.isna : Boolean inverse of Categorical.notna.
"""
return ~self.isna()
notnull = notna
def value_counts(self, dropna: bool = True) -> Series:
"""
Return a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import (
CategoricalIndex,
Series,
)
code, cat = self._codes, self.categories
ncat, mask = (len(cat), code >= 0)
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = coerce_indexer_dtype(ix, self.dtype.categories)
ix = self._from_backing_data(ix)
return Series(
count, index=CategoricalIndex(ix), dtype="int64", name="count", copy=False
)
# error: Argument 2 of "_empty" is incompatible with supertype
# "NDArrayBackedExtensionArray"; supertype defines the argument type as
# "ExtensionDtype"
def _empty( # type: ignore[override]
cls: type_t[Categorical], shape: Shape, dtype: CategoricalDtype
) -> Categorical:
"""
Analogous to np.empty(shape, dtype=dtype)
Parameters
----------
shape : tuple[int]
dtype : CategoricalDtype
"""
arr = cls._from_sequence([], dtype=dtype)
# We have to use np.zeros instead of np.empty otherwise the resulting
# ndarray may contain codes not supported by this dtype, in which
# case repr(result) could segfault.
backing = np.zeros(shape, dtype=arr._ndarray.dtype)
return arr._from_backing_data(backing)
def _internal_get_values(self):
"""
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories.dtype):
return self.categories.take(self._codes, fill_value=NaT)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op) -> None:
"""assert that we are ordered"""
if not self.ordered:
raise TypeError(
f"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n"
)
def argsort(
self, *, ascending: bool = True, kind: SortKind = "quicksort", **kwargs
):
"""
Return the indices that would sort the Categorical.
Missing values are sorted at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm.
**kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
np.ndarray[np.intp]
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = pd.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_values(
self,
*,
inplace: Literal[False] = ...,
ascending: bool = ...,
na_position: str = ...,
) -> Categorical:
...
def sort_values(
self, *, inplace: Literal[True], ascending: bool = ..., na_position: str = ...
) -> None:
...
def sort_values(
self,
*,
inplace: bool = False,
ascending: bool = True,
na_position: str = "last",
) -> Categorical | None:
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {repr(na_position)}")
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if not inplace:
codes = self._codes[sorted_idx]
return self._from_backing_data(codes)
self._codes[:] = self._codes[sorted_idx]
return None
def _rank(
self,
*,
axis: AxisInt = 0,
method: str = "average",
na_option: str = "keep",
ascending: bool = True,
pct: bool = False,
):
"""
See Series.rank.__doc__.
"""
if axis != 0:
raise NotImplementedError
vff = self._values_for_rank()
return algorithms.rank(
vff,
axis=axis,
method=method,
na_option=na_option,
ascending=ascending,
pct=pct,
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype("float64")
values[mask] = np.nan
elif is_any_real_numeric_dtype(self.categories):
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(
Series(self.categories, copy=False).rank().values
)
)
return values
# ------------------------------------------------------------------
# NDArrayBackedExtensionArray compat
def _codes(self) -> np.ndarray:
return self._ndarray
def _box_func(self, i: int):
if i == -1:
return np.NaN
return self.categories[i]
def _unbox_scalar(self, key) -> int:
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
code = self.categories.get_loc(key)
code = self._ndarray.dtype.type(code)
return code
# ------------------------------------------------------------------
def __iter__(self) -> Iterator:
"""
Returns an Iterator over the values of this Categorical.
"""
if self.ndim == 1:
return iter(self._internal_get_values().tolist())
else:
return (self[n] for n in range(len(self)))
def __contains__(self, key) -> bool:
"""
Returns True if `key` is in this Categorical.
"""
# if key is a NaN, check if any NaN is in self.
if is_valid_na_for_dtype(key, self.categories.dtype):
return bool(self.isna().any())
return contains(self, key, container=self._codes)
# ------------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed: bool = False):
# Defer to CategoricalFormatter's formatter.
return None
def _tidy_repr(self, max_vals: int = 10, footer: bool = True) -> str:
"""
a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num) :]._get_repr(length=False, footer=False)
result = f"{head[:-1]}, ..., {tail[1:]}"
if footer:
result = f"{result}\n{self._repr_footer()}"
return str(result)
def _repr_categories(self) -> list[str]:
"""
return the base repr for the categories
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
from pandas.io.formats import format as fmt
format_array = partial(
fmt.format_array, formatter=None, quoting=QUOTE_NONNUMERIC
)
if len(self.categories) > max_categories:
num = max_categories // 2
head = format_array(self.categories[:num])
tail = format_array(self.categories[-num:])
category_strs = head + ["..."] + tail
else:
category_strs = format_array(self.categories)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self) -> str:
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = str(self.categories.dtype)
levheader = f"Categories ({len(self.categories)}, {dtype}): "
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = f"{sep.rstrip()}\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return f"{levheader}[{levstring.replace(' < ... < ', ' ... ')}]"
def _repr_footer(self) -> str:
info = self._repr_categories_info()
return f"Length: {len(self)}\n{info}"
def _get_repr(
self, length: bool = True, na_rep: str = "NaN", footer: bool = True
) -> str:
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(
self, length=length, na_rep=na_rep, footer=footer
)
result = formatter.to_string()
return str(result)
def __repr__(self) -> str:
"""
String representation.
"""
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = f"[], {msg}"
return result
# ------------------------------------------------------------------
def _validate_listlike(self, value):
# NB: here we assume scalar-like tuples have already been excluded
value = extract_array(value, extract_numpy=True)
# require identical categories set
if isinstance(value, Categorical):
if not is_dtype_equal(self.dtype, value.dtype):
raise TypeError(
"Cannot set a Categorical with another, "
"without identical categories"
)
# is_dtype_equal implies categories_match_up_to_permutation
value = self._encode_with_my_categories(value)
return value._codes
from pandas import Index
# tupleize_cols=False for e.g. test_fillna_iterable_category GH#41914
to_add = Index._with_infer(value, tupleize_cols=False).difference(
self.categories
)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise TypeError(
"Cannot setitem on a Categorical with a new "
"category, set the categories first"
)
codes = self.categories.get_indexer(value)
return codes.astype(self._ndarray.dtype, copy=False)
def _reverse_indexer(self) -> dict[Hashable, npt.NDArray[np.intp]]:
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
Dict[Hashable, np.ndarray[np.intp]]
dict of categories -> indexers
Examples
--------
>>> c = pd.Categorical(list('aabca'))
>>> c
['a', 'a', 'b', 'c', 'a']
Categories (3, object): ['a', 'b', 'c']
>>> c.categories
Index(['a', 'b', 'c'], dtype='object')
>>> c.codes
array([0, 0, 1, 2, 0], dtype=int8)
>>> c._reverse_indexer()
{'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(
ensure_platform_int(self.codes), categories.size
)
counts = ensure_int64(counts).cumsum()
_result = (r[start:end] for start, end in zip(counts, counts[1:]))
return dict(zip(categories, _result))
# ------------------------------------------------------------------
# Reductions
def min(self, *, skipna: bool = True, **kwargs):
"""
The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`, NA value if empty
"""
nv.validate_minmax_axis(kwargs.get("axis", 0))
nv.validate_min((), kwargs)
self.check_for_ordered("min")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].min()
else:
return np.nan
else:
pointer = self._codes.min()
return self._wrap_reduction_result(None, pointer)
def max(self, *, skipna: bool = True, **kwargs):
"""
The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`, NA if array is empty
"""
nv.validate_minmax_axis(kwargs.get("axis", 0))
nv.validate_max((), kwargs)
self.check_for_ordered("max")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].max()
else:
return np.nan
else:
pointer = self._codes.max()
return self._wrap_reduction_result(None, pointer)
def _mode(self, dropna: bool = True) -> Categorical:
codes = self._codes
mask = None
if dropna:
mask = self.isna()
res_codes = algorithms.mode(codes, mask=mask)
res_codes = cast(np.ndarray, res_codes)
assert res_codes.dtype == codes.dtype
res = self._from_backing_data(res_codes)
return res
# ------------------------------------------------------------------
# ExtensionArray Interface
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique.
.. versionchanged:: 1.3.0
Previously, unused categories were dropped from the new categories.
Returns
-------
Categorical
See Also
--------
pandas.unique
CategoricalIndex.unique
Series.unique : Return unique values of Series object.
Examples
--------
>>> pd.Categorical(list("baabc")).unique()
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> pd.Categorical(list("baab"), categories=list("abc"), ordered=True).unique()
['b', 'a']
Categories (3, object): ['a' < 'b' < 'c']
"""
# pylint: disable=useless-parent-delegation
return super().unique()
def _cast_quantile_result(self, res_values: np.ndarray) -> np.ndarray:
# make sure we have correct itemsize for resulting codes
assert res_values.dtype == self._ndarray.dtype
return res_values
def equals(self, other: object) -> bool:
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
bool
"""
if not isinstance(other, Categorical):
return False
elif self._categories_match_up_to_permutation(other):
other = self._encode_with_my_categories(other)
return np.array_equal(self._codes, other._codes)
return False
def _concat_same_type(
cls: type[CategoricalT], to_concat: Sequence[CategoricalT], axis: AxisInt = 0
) -> CategoricalT:
from pandas.core.dtypes.concat import union_categoricals
first = to_concat[0]
if axis >= first.ndim:
raise ValueError(
f"axis {axis} is out of bounds for array of dimension {first.ndim}"
)
if axis == 1:
# Flatten, concatenate then reshape
if not all(x.ndim == 2 for x in to_concat):
raise ValueError
# pass correctly-shaped to union_categoricals
tc_flat = []
for obj in to_concat:
tc_flat.extend([obj[:, i] for i in range(obj.shape[1])])
res_flat = cls._concat_same_type(tc_flat, axis=0)
result = res_flat.reshape(len(first), -1, order="F")
return result
result = union_categoricals(to_concat)
return result
# ------------------------------------------------------------------
def _encode_with_my_categories(self, other: Categorical) -> Categorical:
"""
Re-encode another categorical using this Categorical's categories.
Notes
-----
This assumes we have already checked
self._categories_match_up_to_permutation(other).
"""
# Indexing on codes is more efficient if categories are the same,
# so we can apply some optimizations based on the degree of
# dtype-matching.
codes = recode_for_categories(
other.codes, other.categories, self.categories, copy=False
)
return self._from_backing_data(codes)
def _categories_match_up_to_permutation(self, other: Categorical) -> bool:
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
bool
"""
return hash(self.dtype) == hash(other.dtype)
def describe(self) -> DataFrame:
"""
Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / counts.sum()
from pandas import Index
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = Index(["counts", "freqs"])
result.index.name = "categories"
return result
def isin(self, values) -> npt.NDArray[np.bool_]:
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
np.ndarray[bool]
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : Equivalent method on Series.
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
if not is_list_like(values):
values_type = type(values).__name__
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{values_type}]"
)
values = sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
def _replace(self, *, to_replace, value, inplace: bool = False):
from pandas import Index
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
mask = isna(np.asarray(value))
if mask.any():
removals = np.asarray(to_replace)[mask]
removals = cat.categories[cat.categories.isin(removals)]
new_cat = cat.remove_categories(removals)
NDArrayBacked.__init__(cat, new_cat.codes, new_cat.dtype)
ser = cat.categories.to_series()
ser = ser.replace(to_replace=to_replace, value=value)
all_values = Index(ser)
# GH51016: maintain order of existing categories
idxr = cat.categories.get_indexer_for(all_values)
locs = np.arange(len(ser))
locs = np.where(idxr == -1, locs, idxr)
locs = locs.argsort()
new_categories = ser.take(locs)
new_categories = new_categories.drop_duplicates(keep="first")
new_categories = Index(new_categories)
new_codes = recode_for_categories(
cat._codes, all_values, new_categories, copy=False
)
new_dtype = CategoricalDtype(new_categories, ordered=self.dtype.ordered)
NDArrayBacked.__init__(cat, new_codes, new_dtype)
if not inplace:
return cat
# ------------------------------------------------------------------------
# String methods interface
def _str_map(
self, f, na_value=np.nan, dtype=np.dtype("object"), convert: bool = True
):
# Optimization to apply the callable `f` to the categories once
# and rebuild the result by `take`ing from the result with the codes.
# Returns the same type as the object-dtype implementation though.
from pandas.core.arrays import PandasArray
categories = self.categories
codes = self.codes
result = PandasArray(categories.to_numpy())._str_map(f, na_value, dtype)
return take_nd(result, codes, fill_value=na_value)
def _str_get_dummies(self, sep: str = "|"):
# sep may not be in categories. Just bail on this.
from pandas.core.arrays import PandasArray
return PandasArray(self.astype(str))._str_get_dummies(sep)
delegate=Categorical, accessors=["categories", "ordered"], typ="property"
def recode_for_categories(
codes: np.ndarray, old_categories, new_categories, copy: bool = True
) -> np.ndarray:
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : np.ndarray
old_categories, new_categories : Index
copy: bool, default True
Whether to copy if the codes are unchanged.
Returns
-------
new_codes : np.ndarray[np.int64]
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1], dtype=int8)
"""
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
if copy:
return codes.copy()
return codes
elif new_categories.equals(old_categories):
# Same categories, so no need to actually recode
if copy:
return codes.copy()
return codes
indexer = coerce_indexer_dtype(
new_categories.get_indexer(old_categories), new_categories
)
new_codes = take_nd(indexer, codes, fill_value=-1)
return new_codes
def needs_i8_conversion(arr_or_dtype) -> bool:
"""
Check whether the array or dtype should be converted to int64.
An array-like or dtype "needs" such a conversion if the array-like
or dtype is of a datetime-like dtype
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype should be converted to int64.
Examples
--------
>>> needs_i8_conversion(str)
False
>>> needs_i8_conversion(np.int64)
False
>>> needs_i8_conversion(np.datetime64)
True
>>> needs_i8_conversion(np.array(['a', 'b']))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
True
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
"""
if arr_or_dtype is None:
return False
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype.kind in ["m", "M"]
elif isinstance(arr_or_dtype, ExtensionDtype):
return isinstance(arr_or_dtype, (PeriodDtype, DatetimeTZDtype))
try:
dtype = get_dtype(arr_or_dtype)
except (TypeError, ValueError):
return False
if isinstance(dtype, np.dtype):
return dtype.kind in ["m", "M"]
return isinstance(dtype, (PeriodDtype, DatetimeTZDtype))
def unpack_zerodim_and_defer(name: str) -> Callable[[F], F]:
"""
Boilerplate for pandas conventions in arithmetic and comparison methods.
Parameters
----------
name : str
Returns
-------
decorator
"""
def wrapper(method: F) -> F:
return _unpack_zerodim_and_defer(method, name)
return wrapper
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
fill_value = op is operator.ne
@unpack_zerodim_and_defer(opname)
def func(self, other):
hashable = is_hashable(other)
if is_list_like(other) and len(other) != len(self) and not hashable:
# in hashable case we may have a tuple that is itself a category
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if not self._categories_match_up_to_permutation(other):
raise TypeError(msg)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = recode_for_categories(
other.codes, other.categories, self.categories, copy=False
)
else:
other_codes = other._codes
ret = op(self._codes, other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
ret[mask] = fill_value
return ret
if hashable:
if other in self.categories:
i = self._unbox_scalar(other)
ret = op(self._codes, i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# GH#29820 performance trick; get_loc will always give i>=0,
# so in the cases (__ne__, __le__, __lt__) the setting
# here is a no-op, so can be skipped.
mask = self._codes == -1
ret[mask] = fill_value
return ret
else:
return ops.invalid_comparison(self, other, op)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname not in ["__eq__", "__ne__"]:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
if isinstance(other, ExtensionArray) and needs_i8_conversion(other.dtype):
# We would return NotImplemented here, but that messes up
# ExtensionIndex's wrapped methods
return op(other, self)
return getattr(np.array(self), opname)(np.array(other))
func.__name__ = opname
return func | null |
173,276 | from __future__ import annotations
from csv import QUOTE_NONNUMERIC
from functools import partial
import operator
from shutil import get_terminal_size
from typing import (
TYPE_CHECKING,
Hashable,
Iterator,
Literal,
Sequence,
TypeVar,
cast,
overload,
)
import numpy as np
from pandas._config import get_option
from pandas._libs import (
NaT,
algos as libalgos,
lib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._typing import (
ArrayLike,
AstypeArg,
AxisInt,
Dtype,
NpDtype,
Ordered,
Shape,
SortKind,
npt,
type_t,
)
from pandas.compat.numpy import function as nv
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
find_common_type,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_any_real_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_hashable,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
ExtensionDtype,
)
from pandas.core.dtypes.generic import (
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
)
from pandas.core import (
algorithms,
arraylike,
ops,
)
from pandas.core.accessor import (
PandasDelegate,
delegate_names,
)
from pandas.core.algorithms import (
factorize,
take_nd,
)
from pandas.core.arrays._mixins import (
NDArrayBackedExtensionArray,
ravel_compat,
)
from pandas.core.base import (
ExtensionArray,
NoNewAttributesMixin,
PandasObject,
)
import pandas.core.common as com
from pandas.core.construction import (
extract_array,
sanitize_array,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.core.strings.object_array import ObjectStringArrayMixin
from pandas.io.formats import console
The provided code snippet includes necessary dependencies for implementing the `contains` function. Write a Python function `def contains(cat, key, container) -> bool` to solve the following problem:
Helper for membership check for ``key`` in ``cat``. This is a helper method for :method:`__contains__` and :class:`CategoricalIndex.__contains__`. Returns True if ``key`` is in ``cat.categories`` and the location of ``key`` in ``categories`` is in ``container``. Parameters ---------- cat : :class:`Categorical`or :class:`categoricalIndex` key : a hashable object The key to check membership for. container : Container (e.g. list-like or mapping) The container to check for membership in. Returns ------- is_in : bool True if ``key`` is in ``self.categories`` and location of ``key`` in ``categories`` is in ``container``, else False. Notes ----- This method does not check for NaN values. Do that separately before calling this method.
Here is the function:
def contains(cat, key, container) -> bool:
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc) | Helper for membership check for ``key`` in ``cat``. This is a helper method for :method:`__contains__` and :class:`CategoricalIndex.__contains__`. Returns True if ``key`` is in ``cat.categories`` and the location of ``key`` in ``categories`` is in ``container``. Parameters ---------- cat : :class:`Categorical`or :class:`categoricalIndex` key : a hashable object The key to check membership for. container : Container (e.g. list-like or mapping) The container to check for membership in. Returns ------- is_in : bool True if ``key`` is in ``self.categories`` and location of ``key`` in ``categories`` is in ``container``, else False. Notes ----- This method does not check for NaN values. Do that separately before calling this method. |
173,277 | from __future__ import annotations
from csv import QUOTE_NONNUMERIC
from functools import partial
import operator
from shutil import get_terminal_size
from typing import (
TYPE_CHECKING,
Hashable,
Iterator,
Literal,
Sequence,
TypeVar,
cast,
overload,
)
import numpy as np
from pandas._config import get_option
from pandas._libs import (
NaT,
algos as libalgos,
lib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._typing import (
ArrayLike,
AstypeArg,
AxisInt,
Dtype,
NpDtype,
Ordered,
Shape,
SortKind,
npt,
type_t,
)
from pandas.compat.numpy import function as nv
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
find_common_type,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_any_real_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_hashable,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
ExtensionDtype,
)
from pandas.core.dtypes.generic import (
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
)
from pandas.core import (
algorithms,
arraylike,
ops,
)
from pandas.core.accessor import (
PandasDelegate,
delegate_names,
)
from pandas.core.algorithms import (
factorize,
take_nd,
)
from pandas.core.arrays._mixins import (
NDArrayBackedExtensionArray,
ravel_compat,
)
from pandas.core.base import (
ExtensionArray,
NoNewAttributesMixin,
PandasObject,
)
import pandas.core.common as com
from pandas.core.construction import (
extract_array,
sanitize_array,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.core.strings.object_array import ObjectStringArrayMixin
from pandas.io.formats import console
def coerce_indexer_dtype(indexer, categories) -> np.ndarray:
"""coerce the indexer input array to the smallest dtype possible"""
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
The provided code snippet includes necessary dependencies for implementing the `_get_codes_for_values` function. Write a Python function `def _get_codes_for_values(values, categories: Index) -> np.ndarray` to solve the following problem:
utility routine to turn values into codes given the specified categories If `values` is known to be a Categorical, use recode_for_categories instead.
Here is the function:
def _get_codes_for_values(values, categories: Index) -> np.ndarray:
"""
utility routine to turn values into codes given the specified categories
If `values` is known to be a Categorical, use recode_for_categories instead.
"""
if values.ndim > 1:
flat = values.ravel()
codes = _get_codes_for_values(flat, categories)
return codes.reshape(values.shape)
codes = categories.get_indexer_for(values)
return coerce_indexer_dtype(codes, categories) | utility routine to turn values into codes given the specified categories If `values` is known to be a Categorical, use recode_for_categories instead. |
173,278 | from __future__ import annotations
import numbers
from typing import (
TYPE_CHECKING,
cast,
)
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._typing import (
Dtype,
DtypeObj,
type_t,
)
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.array_algos import masked_accumulations
from pandas.core.arrays.masked import (
BaseMaskedArray,
BaseMaskedDtype,
)
class BooleanArray(BaseMaskedArray):
"""
Array of boolean (True/False) data with missing values.
This is a pandas Extension array for boolean data, under the hood
represented by 2 numpy arrays: a boolean array with the data and
a boolean array with the mask (True indicating missing).
BooleanArray implements Kleene logic (sometimes called three-value
logic) for logical operations. See :ref:`boolean.kleene` for more.
To construct an BooleanArray from generic array-like input, use
:func:`pandas.array` specifying ``dtype="boolean"`` (see examples
below).
.. warning::
BooleanArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : numpy.ndarray
A 1-d boolean-dtype array with the data.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values (True
indicates missing).
copy : bool, default False
Whether to copy the `values` and `mask` arrays.
Attributes
----------
None
Methods
-------
None
Returns
-------
BooleanArray
Examples
--------
Create an BooleanArray with :func:`pandas.array`:
>>> pd.array([True, False, None], dtype="boolean")
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
# Fill values used for any/all
# Incompatible types in assignment (expression has type "bool", base class
# "BaseMaskedArray" defined the type as "<typing special form>")
_truthy_value = True # type: ignore[assignment]
_falsey_value = False # type: ignore[assignment]
_TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
_FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
def __init__(
self, values: np.ndarray, mask: np.ndarray, copy: bool = False
) -> None:
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
raise TypeError(
"values should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
self._dtype = BooleanDtype()
super().__init__(values, mask, copy=copy)
def dtype(self) -> BooleanDtype:
return self._dtype
def _from_sequence_of_strings(
cls,
strings: list[str],
*,
dtype: Dtype | None = None,
copy: bool = False,
true_values: list[str] | None = None,
false_values: list[str] | None = None,
) -> BooleanArray:
true_values_union = cls._TRUE_VALUES.union(true_values or [])
false_values_union = cls._FALSE_VALUES.union(false_values or [])
def map_string(s) -> bool:
if s in true_values_union:
return True
elif s in false_values_union:
return False
else:
raise ValueError(f"{s} cannot be cast to bool")
scalars = np.array(strings, dtype=object)
mask = isna(scalars)
scalars[~mask] = list(map(map_string, scalars[~mask]))
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
def _coerce_to_array(
cls, value, *, dtype: DtypeObj, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
if dtype:
assert dtype == "boolean"
return coerce_to_array(value, copy=copy)
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other_is_scalar = lib.is_scalar(other)
mask = None
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other, dtype="bool")
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
other, mask = coerce_to_array(other, copy=False)
elif isinstance(other, np.bool_):
other = other.item()
if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
raise TypeError(
"'other' should be pandas.NA or a bool. "
f"Got {type(other).__name__} instead."
)
if not other_is_scalar and len(self) != len(other):
raise ValueError("Lengths must match")
if op.__name__ in {"or_", "ror_"}:
result, mask = ops.kleene_or(self._data, other, self._mask, mask)
elif op.__name__ in {"and_", "rand_"}:
result, mask = ops.kleene_and(self._data, other, self._mask, mask)
else:
# i.e. xor, rxor
result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
# i.e. BooleanArray
return self._maybe_mask_result(result, mask)
def _accumulate(
self, name: str, *, skipna: bool = True, **kwargs
) -> BaseMaskedArray:
data = self._data
mask = self._mask
if name in ("cummin", "cummax"):
op = getattr(masked_accumulations, name)
data, mask = op(data, mask, skipna=skipna, **kwargs)
return type(self)(data, mask, copy=False)
else:
from pandas.core.arrays import IntegerArray
return IntegerArray(data.astype(int), mask)._accumulate(
name, skipna=skipna, **kwargs
)
def cast(typ: Type[_T], val: Any) -> _T: ...
def cast(typ: str, val: Any) -> Any: ...
def cast(typ: object, val: Any) -> Any: ...
def is_numeric_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a numeric dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a numeric dtype.
Examples
--------
>>> from pandas.api.types import is_numeric_dtype
>>> is_numeric_dtype(str)
False
>>> is_numeric_dtype(int)
True
>>> is_numeric_dtype(float)
True
>>> is_numeric_dtype(np.uint64)
True
>>> is_numeric_dtype(np.datetime64)
False
>>> is_numeric_dtype(np.timedelta64)
False
>>> is_numeric_dtype(np.array(['a', 'b']))
False
>>> is_numeric_dtype(pd.Series([1, 2]))
True
>>> is_numeric_dtype(pd.Index([1, 2.]))
True
>>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
False
"""
return _is_dtype_type(
arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_)
) or _is_dtype(
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric
)
def isna(obj: Scalar) -> bool:
...
def isna(
obj: ArrayLike | Index | list,
) -> npt.NDArray[np.bool_]:
...
def isna(obj: NDFrameT) -> NDFrameT:
...
def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
The provided code snippet includes necessary dependencies for implementing the `coerce_to_array` function. Write a Python function `def coerce_to_array( values, mask=None, copy: bool = False ) -> tuple[np.ndarray, np.ndarray]` to solve the following problem:
Coerce the input values array to numpy arrays with a mask. Parameters ---------- values : 1D list-like mask : bool 1D array, optional copy : bool, default False if True, copy the input Returns ------- tuple of (values, mask)
Here is the function:
def coerce_to_array(
values, mask=None, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
if isinstance(values, BooleanArray):
if mask is not None:
raise ValueError("cannot pass mask for BooleanArray input")
values, mask = values._data, values._mask
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
mask_values = None
if isinstance(values, np.ndarray) and values.dtype == np.bool_:
if copy:
values = values.copy()
elif isinstance(values, np.ndarray) and is_numeric_dtype(values.dtype):
mask_values = isna(values)
values_bool = np.zeros(len(values), dtype=bool)
values_bool[~mask_values] = values[~mask_values].astype(bool)
if not np.all(
values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
):
raise TypeError("Need to pass bool-like values")
values = values_bool
else:
values_object = np.asarray(values, dtype=object)
inferred_dtype = lib.infer_dtype(values_object, skipna=True)
integer_like = ("floating", "integer", "mixed-integer-float")
if inferred_dtype not in ("boolean", "empty") + integer_like:
raise TypeError("Need to pass bool-like values")
# mypy does not narrow the type of mask_values to npt.NDArray[np.bool_]
# within this branch, it assumes it can also be None
mask_values = cast("npt.NDArray[np.bool_]", isna(values_object))
values = np.zeros(len(values), dtype=bool)
values[~mask_values] = values_object[~mask_values].astype(bool)
# if the values were integer-like, validate it were actually 0/1's
if (inferred_dtype in integer_like) and not (
np.all(
values[~mask_values].astype(float)
== values_object[~mask_values].astype(float)
)
):
raise TypeError("Need to pass bool-like values")
if mask is None and mask_values is None:
mask = np.zeros(values.shape, dtype=bool)
elif mask is None:
mask = mask_values
else:
if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
if mask_values is not None:
mask = mask | mask_values
else:
if copy:
mask = mask.copy()
else:
mask = np.array(mask, dtype=bool)
if mask_values is not None:
mask = mask | mask_values
if values.shape != mask.shape:
raise ValueError("values.shape and mask.shape must match")
return values, mask | Coerce the input values array to numpy arrays with a mask. Parameters ---------- values : 1D list-like mask : bool 1D array, optional copy : bool, default False if True, copy the input Returns ------- tuple of (values, mask) |
173,279 | from __future__ import annotations
from datetime import (
datetime,
timedelta,
)
from functools import wraps
import operator
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterator,
Literal,
Sequence,
TypeVar,
Union,
cast,
final,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
algos,
lib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
IncompatibleFrequency,
NaT,
NaTType,
Period,
Resolution,
Tick,
Timedelta,
Timestamp,
astype_overflowsafe,
delta_to_nanoseconds,
get_unit_from_dtype,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
to_offset,
)
from pandas._libs.tslibs.fields import (
RoundTo,
round_nsint64,
)
from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions
from pandas._libs.tslibs.timestamps import integer_op_not_supported
from pandas._typing import (
ArrayLike,
AxisInt,
DatetimeLikeScalar,
Dtype,
DtypeObj,
F,
NpDtype,
PositionalIndexer2D,
PositionalIndexerTuple,
ScalarIndexer,
SequenceIndexer,
TimeAmbiguous,
TimeNonexistent,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import (
AbstractMethodError,
InvalidComparison,
PerformanceWarning,
)
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_all_strings,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
)
from pandas.core.dtypes.generic import (
ABCCategorical,
ABCMultiIndex,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
)
from pandas.core import (
algorithms,
nanops,
ops,
)
from pandas.core.algorithms import (
checked_add_with_arr,
isin,
unique1d,
)
from pandas.core.array_algos import datetimelike_accumulations
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays._mixins import (
NDArrayBackedExtensionArray,
ravel_compat,
)
from pandas.core.arrays.base import ExtensionArray
from pandas.core.arrays.integer import IntegerArray
import pandas.core.common as com
from pandas.core.construction import (
array as pd_array,
ensure_wrapped_if_datetimelike,
extract_array,
)
from pandas.core.indexers import (
check_array_indexer,
check_setitem_lengths,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import (
invalid_comparison,
make_invalid_op,
)
from pandas.tseries import frequencies
def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ...
def cast(typ: Type[_T], val: Any) -> _T: ...
def cast(typ: str, val: Any) -> Any: ...
def cast(typ: object, val: Any) -> Any: ...
F = TypeVar("F", bound=FuncType)
def is_period_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the Period dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the Period dtype.
Examples
--------
>>> is_period_dtype(object)
False
>>> is_period_dtype(PeriodDtype(freq="D"))
True
>>> is_period_dtype([1, 2, 3])
False
>>> is_period_dtype(pd.Period("2017-01-01"))
False
>>> is_period_dtype(pd.PeriodIndex([], freq="A"))
True
"""
if isinstance(arr_or_dtype, ExtensionDtype):
# GH#33400 fastpath for dtype object
return arr_or_dtype.type is Period
if arr_or_dtype is None:
return False
return PeriodDtype.is_dtype(arr_or_dtype)
The provided code snippet includes necessary dependencies for implementing the `_period_dispatch` function. Write a Python function `def _period_dispatch(meth: F) -> F` to solve the following problem:
For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results in PeriodArray. We cannot use ._ndarray directly for the affected methods because the i8 data has different semantics on NaT values.
Here is the function:
def _period_dispatch(meth: F) -> F:
"""
For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results
in PeriodArray. We cannot use ._ndarray directly for the affected
methods because the i8 data has different semantics on NaT values.
"""
@wraps(meth)
def new_meth(self, *args, **kwargs):
if not is_period_dtype(self.dtype):
return meth(self, *args, **kwargs)
arr = self.view("M8[ns]")
result = meth(arr, *args, **kwargs)
if result is NaT:
return NaT
elif isinstance(result, Timestamp):
return self._box_func(result._value)
res_i8 = result.view("i8")
return self._from_backing_data(res_i8)
return cast(F, new_meth) | For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results in PeriodArray. We cannot use ._ndarray directly for the affected methods because the i8 data has different semantics on NaT values. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.